[linux-next] LinuxNextTracking branch, master, updated. next-20140107

batman at open-mesh.org batman at open-mesh.org
Wed Jan 8 00:19:04 CET 2014


The following commit has been merged in the master branch:
commit 56a4342dfe3145cd66f766adccb28fd9b571606d
Merge: 805c1f4aedaba1bc8d839e7c27b128083dd5c2f0 fe0d692bbc645786bce1a98439e548ae619269f5
Author: David S. Miller <davem at davemloft.net>
Date:   Mon Jan 6 17:37:45 2014 -0500

    Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
    
    Conflicts:
    	drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
    	net/ipv6/ip6_tunnel.c
    	net/ipv6/ip6_vti.c
    
    ipv6 tunnel statistic bug fixes conflicting with consolidation into
    generic sw per-cpu net stats.
    
    qlogic conflict between queue counting bug fix and the addition
    of multiple MAC address support.
    
    Signed-off-by: David S. Miller <davem at davemloft.net>

diff --combined MAINTAINERS
index 23bd3c2,21c038f..e11d495
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -783,7 -783,7 +783,7 @@@ F:	arch/arm/boot/dts/sama*.dt
  F:	arch/arm/boot/dts/sama*.dtsi
  
  ARM/CALXEDA HIGHBANK ARCHITECTURE
- M:	Rob Herring <rob.herring at calxeda.com>
+ M:	Rob Herring <robh at kernel.org>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
  S:	Maintained
  F:	arch/arm/mach-highbank/
@@@ -1008,6 -1008,8 +1008,8 @@@ M:	Santosh Shilimkar <santosh.shilimkar
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
  S:	Maintained
  F:	arch/arm/mach-keystone/
+ F:	drivers/clk/keystone/
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
  
  ARM/LOGICPD PXA270 MACHINE SUPPORT
  M:	Lennert Buytenhek <kernel at wantstofly.org>
@@@ -1430,7 -1432,7 +1432,7 @@@ F:	Documentation/aoe
  F:	drivers/block/aoe/
  
  ATHEROS ATH GENERIC UTILITIES
 -M:	"Luis R. Rodriguez" <mcgrof at qca.qualcomm.com>
 +M:	"Luis R. Rodriguez" <mcgrof at do-not-panic.com>
  L:	linux-wireless at vger.kernel.org
  S:	Supported
  F:	drivers/net/wireless/ath/*
@@@ -1438,7 -1440,7 +1440,7 @@@
  ATHEROS ATH5K WIRELESS DRIVER
  M:	Jiri Slaby <jirislaby at gmail.com>
  M:	Nick Kossifidis <mickflemm at gmail.com>
 -M:	"Luis R. Rodriguez" <mcgrof at qca.qualcomm.com>
 +M:	"Luis R. Rodriguez" <mcgrof at do-not-panic.com>
  L:	linux-wireless at vger.kernel.org
  L:	ath5k-devel at lists.ath5k.org
  W:	http://wireless.kernel.org/en/users/Drivers/ath5k
@@@ -1453,6 -1455,17 +1455,6 @@@ T:	git git://github.com/kvalo/ath.gi
  S:	Supported
  F:	drivers/net/wireless/ath/ath6kl/
  
 -ATHEROS ATH9K WIRELESS DRIVER
 -M:	"Luis R. Rodriguez" <mcgrof at qca.qualcomm.com>
 -M:	Jouni Malinen <jouni at qca.qualcomm.com>
 -M:	Vasanthakumar Thiagarajan <vthiagar at qca.qualcomm.com>
 -M:	Senthil Balasubramanian <senthilb at qca.qualcomm.com>
 -L:	linux-wireless at vger.kernel.org
 -L:	ath9k-devel at lists.ath9k.org
 -W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 -S:	Supported
 -F:	drivers/net/wireless/ath/ath9k/
 -
  WILOCITY WIL6210 WIRELESS DRIVER
  M:	Vladimir Kondratiev <qca_vkondrat at qca.qualcomm.com>
  L:	linux-wireless at vger.kernel.org
@@@ -2008,7 -2021,6 +2010,7 @@@ L:	linux-can at vger.kernel.or
  W:	http://gitorious.org/linux-can
  T:	git git://gitorious.org/linux-can/linux-can-next.git
  S:	Maintained
 +F:	Documentation/networking/can.txt
  F:	net/can/
  F:	include/linux/can/core.h
  F:	include/uapi/linux/can.h
@@@ -3823,6 -3835,12 +3825,12 @@@ T:	git git://linuxtv.org/media_tree.gi
  S:	Maintained
  F:	drivers/media/usb/gspca/
  
+ GUID PARTITION TABLE (GPT)
+ M:	Davidlohr Bueso <davidlohr at hp.com>
+ L:	linux-efi at vger.kernel.org
+ S:	Maintained
+ F:	block/partitions/efi.*
+ 
  STK1160 USB VIDEO CAPTURE DRIVER
  M:	Ezequiel Garcia <elezegarcia at gmail.com>
  L:	linux-media at vger.kernel.org
@@@ -4451,7 -4469,7 +4459,7 @@@ M:	Deepak Saxena <dsaxena at plexity.net
  S:	Maintained
  F:	drivers/char/hw_random/ixp4xx-rng.c
  
 -INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e)
 +INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
  M:	Jeff Kirsher <jeffrey.t.kirsher at intel.com>
  M:	Jesse Brandeburg <jesse.brandeburg at intel.com>
  M:	Bruce Allan <bruce.w.allan at intel.com>
@@@ -4460,7 -4478,6 +4468,7 @@@ M:	Don Skidmore <donald.c.skidmore at inte
  M:	Greg Rose <gregory.v.rose at intel.com>
  M:	Alex Duyck <alexander.h.duyck at intel.com>
  M:	John Ronciak <john.ronciak at intel.com>
 +M:	Mitch Williams <mitch.a.williams at intel.com>
  L:	e1000-devel at lists.sourceforge.net
  W:	http://www.intel.com/support/feedback.htm
  W:	http://e1000.sourceforge.net/
@@@ -4476,7 -4493,6 +4484,7 @@@ F:	Documentation/networking/ixgb.tx
  F:	Documentation/networking/ixgbe.txt
  F:	Documentation/networking/ixgbevf.txt
  F:	Documentation/networking/i40e.txt
 +F:	Documentation/networking/i40evf.txt
  F:	drivers/net/ethernet/intel/
  
  INTEL-MID GPIO DRIVER
@@@ -6240,7 -6256,7 +6248,7 @@@ F:	drivers/i2c/busses/i2c-ocores.
  
  OPEN FIRMWARE AND FLATTENED DEVICE TREE
  M:	Grant Likely <grant.likely at linaro.org>
- M:	Rob Herring <rob.herring at calxeda.com>
+ M:	Rob Herring <robh+dt at kernel.org>
  L:	devicetree at vger.kernel.org
  W:	http://fdt.secretlab.ca
  T:	git git://git.secretlab.ca/git/linux-2.6.git
@@@ -6252,7 -6268,7 +6260,7 @@@ K:	of_get_propert
  K:	of_match_table
  
  OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
- M:	Rob Herring <rob.herring at calxeda.com>
+ M:	Rob Herring <robh+dt at kernel.org>
  M:	Pawel Moll <pawel.moll at arm.com>
  M:	Mark Rutland <mark.rutland at arm.com>
  M:	Ian Campbell <ijc+devicetree at hellion.org.uk>
@@@ -6967,14 -6983,6 +6975,14 @@@ T:	git git://linuxtv.org/anttip/media_t
  S:	Maintained
  F:	drivers/media/tuners/qt1010*
  
 +QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 +M:	QCA ath9k Development <ath9k-devel at qca.qualcomm.com>
 +L:	linux-wireless at vger.kernel.org
 +L:	ath9k-devel at lists.ath9k.org
 +W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 +S:	Supported
 +F:	drivers/net/wireless/ath/ath9k/
 +
  QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
  M:	Kalle Valo <kvalo at qca.qualcomm.com>
  L:	ath10k at lists.infradead.org
@@@ -8615,11 -8623,12 +8623,11 @@@ S:	Maintaine
  F:	sound/soc/codecs/twl4030*
  
  TI WILINK WIRELESS DRIVERS
 -M:	Luciano Coelho <luca at coelho.fi>
  L:	linux-wireless at vger.kernel.org
  W:	http://wireless.kernel.org/en/users/Drivers/wl12xx
  W:	http://wireless.kernel.org/en/users/Drivers/wl1251
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
 -S:	Maintained
 +S:	Orphan
  F:	drivers/net/wireless/ti/
  F:	include/linux/wl12xx.h
  
@@@ -9581,7 -9590,7 +9589,7 @@@ F:	drivers/xen/*swiotlb
  
  XFS FILESYSTEM
  P:	Silicon Graphics Inc
- M:	Dave Chinner <dchinner at fromorbit.com>
+ M:	Dave Chinner <david at fromorbit.com>
  M:	Ben Myers <bpm at sgi.com>
  M:	xfs at oss.sgi.com
  L:	xfs at oss.sgi.com
diff --combined drivers/bluetooth/ath3k.c
index d3fdc32,dceb85f..106d1d8
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@@ -83,11 -83,11 +83,12 @@@ static const struct usb_device_id ath3k
  	{ USB_DEVICE(0x04CA, 0x3005) },
  	{ USB_DEVICE(0x04CA, 0x3006) },
  	{ USB_DEVICE(0x04CA, 0x3008) },
 +	{ USB_DEVICE(0x04CA, 0x300b) },
  	{ USB_DEVICE(0x13d3, 0x3362) },
  	{ USB_DEVICE(0x0CF3, 0xE004) },
  	{ USB_DEVICE(0x0CF3, 0xE005) },
  	{ USB_DEVICE(0x0930, 0x0219) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
  	{ USB_DEVICE(0x0489, 0xe057) },
  	{ USB_DEVICE(0x13d3, 0x3393) },
  	{ USB_DEVICE(0x0489, 0xe04e) },
@@@ -97,7 -97,6 +98,7 @@@
  	{ USB_DEVICE(0x13d3, 0x3402) },
  	{ USB_DEVICE(0x0cf3, 0x3121) },
  	{ USB_DEVICE(0x0cf3, 0xe003) },
 +	{ USB_DEVICE(0x0489, 0xe05f) },
  
  	/* Atheros AR5BBU12 with sflash firmware */
  	{ USB_DEVICE(0x0489, 0xE02C) },
@@@ -127,11 -126,11 +128,12 @@@ static const struct usb_device_id ath3k
  	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
 +	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
@@@ -141,7 -140,6 +143,7 @@@
  	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
 +	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
  
  	/* Atheros AR5BBU22 with sflash firmware */
  	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --combined drivers/bluetooth/btusb.c
index bfbcc5a,3980fd1..9f7e539
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@@ -150,11 -150,11 +150,12 @@@ static const struct usb_device_id black
  	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
 +	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
@@@ -164,7 -164,6 +165,7 @@@
  	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
 +	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
  
  	/* Atheros AR5BBU12 with sflash firmware */
  	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@@ -225,7 -224,6 +226,7 @@@
  
  	/* Intel Bluetooth device */
  	{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
 +	{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
  
  	{ }	/* Terminating entry */
  };
@@@ -1438,10 -1436,8 +1439,10 @@@ static int btusb_probe(struct usb_inter
  	if (id->driver_info & BTUSB_BCM92035)
  		hdev->setup = btusb_setup_bcm92035;
  
 -	if (id->driver_info & BTUSB_INTEL)
 +	if (id->driver_info & BTUSB_INTEL) {
 +		usb_enable_autosuspend(data->udev);
  		hdev->setup = btusb_setup_intel;
 +	}
  
  	/* Interface numbers are hardcoded in the specification */
  	data->isoc = usb_ifnum_to_if(data->udev, 1);
diff --combined drivers/net/bonding/bond_3ad.c
index 81559b2,4ced594..539e24a
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@@ -90,9 -90,8 +90,9 @@@
  #define     AD_LINK_SPEED_BITMASK_10000MBPS   0x10
  //endalloun
  
 -// compare MAC addresses
 -#define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN)
 +/* compare MAC addresses */
 +#define MAC_ADDRESS_EQUAL(A, B)	\
 +	ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
  
  static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
  static u16 ad_ticks_per_sec;
@@@ -148,12 -147,11 +148,12 @@@ static inline struct aggregator *__get_
  	struct bonding *bond = __get_bond_by_port(port);
  	struct slave *first_slave;
  
 -	// If there's no bond for this port, or bond has no slaves
 +	/* If there's no bond for this port, or bond has no slaves */
  	if (bond == NULL)
  		return NULL;
 -	first_slave = bond_first_slave(bond);
 -
 +	rcu_read_lock();
 +	first_slave = bond_first_slave_rcu(bond);
 +	rcu_read_unlock();
  	return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
  }
  
@@@ -418,18 -416,17 +418,18 @@@ static u16 __ad_timer_to_ticks(u16 time
   */
  static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
  {
 -	// check if all parameters are alike
 +	/* check if all parameters are alike
 +	 * or this is individual link(aggregation == FALSE)
 +	 * then update the state machine Matched variable.
 +	 */
  	if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
  	     (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
 -	     !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
 +	     MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
  	     (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
  	     (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
  	     ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
 -	    // or this is individual link(aggregation == FALSE)
  	    ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
  		) {
 -		// update the state machine Matched variable
  		port->sm_vars |= AD_PORT_MATCHED;
  	} else {
  		port->sm_vars &= ~AD_PORT_MATCHED;
@@@ -509,15 -506,14 +509,15 @@@ static void __update_selected(struct la
  	if (lacpdu && port) {
  		const struct port_params *partner = &port->partner_oper;
  
 -		// check if any parameter is different
 +		/* check if any parameter is different then
 +		 * update the state machine selected variable.
 +		 */
  		if (ntohs(lacpdu->actor_port) != partner->port_number ||
  		    ntohs(lacpdu->actor_port_priority) != partner->port_priority ||
 -		    MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system) ||
 +		    !MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
  		    ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
  		    ntohs(lacpdu->actor_key) != partner->key ||
  		    (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
 -			// update the state machine Selected variable
  			port->sm_vars &= ~AD_PORT_SELECTED;
  		}
  	}
@@@ -541,16 -537,15 +541,16 @@@ static void __update_default_selected(s
  		const struct port_params *admin = &port->partner_admin;
  		const struct port_params *oper = &port->partner_oper;
  
 -		// check if any parameter is different
 +		/* check if any parameter is different then
 +		 * update the state machine selected variable.
 +		 */
  		if (admin->port_number != oper->port_number ||
  		    admin->port_priority != oper->port_priority ||
 -		    MAC_ADDRESS_COMPARE(&admin->system, &oper->system) ||
 +		    !MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
  		    admin->system_priority != oper->system_priority ||
  		    admin->key != oper->key ||
  		    (admin->port_state & AD_STATE_AGGREGATION)
  			!= (oper->port_state & AD_STATE_AGGREGATION)) {
 -			// update the state machine Selected variable
  			port->sm_vars &= ~AD_PORT_SELECTED;
  		}
  	}
@@@ -570,14 -565,12 +570,14 @@@
   */
  static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
  {
 -	// validate lacpdu and port
 +	/* validate lacpdu and port */
  	if (lacpdu && port) {
 -		// check if any parameter is different
 +		/* check if any parameter is different then
 +		 * update the port->ntt.
 +		 */
  		if ((ntohs(lacpdu->partner_port) != port->actor_port_number) ||
  		    (ntohs(lacpdu->partner_port_priority) != port->actor_port_priority) ||
 -		    MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) ||
 +		    !MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
  		    (ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
  		    (ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
  		    ((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
@@@ -585,6 -578,7 +585,6 @@@
  		    ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
  		    ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
  		   ) {
 -
  			port->ntt = true;
  		}
  	}
@@@ -708,13 -702,9 +708,13 @@@ static struct aggregator *__get_active_
  	struct list_head *iter;
  	struct slave *slave;
  
 -	bond_for_each_slave(bond, slave, iter)
 -		if (SLAVE_AD_INFO(slave).aggregator.is_active)
 +	rcu_read_lock();
 +	bond_for_each_slave_rcu(bond, slave, iter)
 +		if (SLAVE_AD_INFO(slave).aggregator.is_active) {
 +			rcu_read_unlock();
  			return &(SLAVE_AD_INFO(slave).aggregator);
 +		}
 +	rcu_read_unlock();
  
  	return NULL;
  }
@@@ -1081,8 -1071,9 +1081,8 @@@ static void ad_rx_machine(struct lacpd
  			port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
  			break;
  		case AD_RX_CURRENT:
 -			// detect loopback situation
 -			if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
 -				// INFO_RECEIVED_LOOPBACK_FRAMES
 +			/* detect loopback situation */
 +			if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system), &(port->actor_system))) {
  				pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
  				       "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
  				       port->slave->bond->dev->name, port->slave->dev->name);
@@@ -1094,7 -1085,7 +1094,7 @@@
  			port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
  			port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
  			break;
 -		default:    //to silence the compiler
 +		default:    /* to silence the compiler */
  			break;
  		}
  	}
@@@ -1285,17 -1276,17 +1285,17 @@@ static void ad_port_selection_logic(str
  				free_aggregator = aggregator;
  			continue;
  		}
 -		// check if current aggregator suits us
 -		if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND
 -		     !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper.system)) &&
 +		/* check if current aggregator suits us */
 +		if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && /* if all parameters match AND */
 +		     MAC_ADDRESS_EQUAL(&(aggregator->partner_system), &(port->partner_oper.system)) &&
  		     (aggregator->partner_system_priority == port->partner_oper.system_priority) &&
  		     (aggregator->partner_oper_aggregator_key == port->partner_oper.key)
  		    ) &&
 -		    ((MAC_ADDRESS_COMPARE(&(port->partner_oper.system), &(null_mac_addr)) && // partner answers
 -		      !aggregator->is_individual)  // but is not individual OR
 +		    ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
 +		      !aggregator->is_individual)  /* but is not individual OR */
  		    )
  		   ) {
 -			// attach to the founded aggregator
 +			/* attach to the founded aggregator */
  			port->aggregator = aggregator;
  			port->actor_port_aggregator_identifier =
  				port->aggregator->aggregator_identifier;
@@@ -1480,8 -1471,7 +1480,8 @@@ static void ad_agg_selection_logic(stru
  	active = __get_active_agg(agg);
  	best = (active && agg_device_up(active)) ? active : NULL;
  
 -	bond_for_each_slave(bond, slave, iter) {
 +	rcu_read_lock();
 +	bond_for_each_slave_rcu(bond, slave, iter) {
  		agg = &(SLAVE_AD_INFO(slave).aggregator);
  
  		agg->is_active = 0;
@@@ -1515,7 -1505,7 +1515,7 @@@
  		active->is_active = 1;
  	}
  
 -	// if there is new best aggregator, activate it
 +	/* if there is new best aggregator, activate it */
  	if (best) {
  		pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
  			 best->aggregator_identifier, best->num_of_ports,
@@@ -1526,7 -1516,7 +1526,7 @@@
  			 best->lag_ports, best->slave,
  			 best->slave ? best->slave->dev->name : "NULL");
  
 -		bond_for_each_slave(bond, slave, iter) {
 +		bond_for_each_slave_rcu(bond, slave, iter) {
  			agg = &(SLAVE_AD_INFO(slave).aggregator);
  
  			pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
@@@ -1536,11 -1526,10 +1536,11 @@@
  				 agg->is_individual, agg->is_active);
  		}
  
 -		// check if any partner replys
 +		/* check if any partner replys */
  		if (best->is_individual) {
  			pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
 -				   best->slave ? best->slave->bond->dev->name : "NULL");
 +				best->slave ?
 +				best->slave->bond->dev->name : "NULL");
  		}
  
  		best->is_active = 1;
@@@ -1552,7 -1541,7 +1552,7 @@@
  			 best->partner_oper_aggregator_key,
  			 best->is_individual, best->is_active);
  
 -		// disable the ports that were related to the former active_aggregator
 +		/* disable the ports that were related to the former active_aggregator */
  		if (active) {
  			for (port = active->lag_ports; port;
  			     port = port->next_port_in_aggregator) {
@@@ -1576,8 -1565,6 +1576,8 @@@
  		}
  	}
  
 +	rcu_read_unlock();
 +
  	bond_3ad_set_carrier(bond);
  }
  
@@@ -1709,7 -1696,7 +1709,7 @@@ static void ad_enable_collecting_distri
   */
  static void ad_disable_collecting_distributing(struct port *port)
  {
 -	if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
 +	if (port->aggregator && !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system), &(null_mac_addr))) {
  		pr_debug("Disabling port %d(LAG %d)\n",
  			 port->actor_port_number,
  			 port->aggregator->aggregator_identifier);
@@@ -1830,8 -1817,8 +1830,8 @@@ static u16 aggregator_identifier
   */
  void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
  {
 -	// check that the bond is not initialized yet
 -	if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
 +	/* check that the bond is not initialized yet */
 +	if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
  				bond->dev->dev_addr)) {
  
  		aggregator_identifier = 0;
@@@ -1855,16 -1842,22 +1855,16 @@@
   * Returns:   0 on success
   *          < 0 on error
   */
 -int bond_3ad_bind_slave(struct slave *slave)
 +void bond_3ad_bind_slave(struct slave *slave)
  {
  	struct bonding *bond = bond_get_bond_by_slave(slave);
  	struct port *port;
  	struct aggregator *aggregator;
  
 -	if (bond == NULL) {
 -		pr_err("%s: The slave %s is not attached to its bond\n",
 -		       slave->bond->dev->name, slave->dev->name);
 -		return -1;
 -	}
 -
 -	//check that the slave has not been initialized yet.
 +	/* check that the slave has not been initialized yet. */
  	if (SLAVE_AD_INFO(slave).port.slave != slave) {
  
 -		// port initialization
 +		/* port initialization */
  		port = &(SLAVE_AD_INFO(slave).port);
  
  		ad_initialize_port(port, bond->params.lacp_fast);
@@@ -1872,30 -1865,28 +1872,30 @@@
  		__initialize_port_locks(slave);
  		port->slave = slave;
  		port->actor_port_number = SLAVE_AD_INFO(slave).id;
 -		// key is determined according to the link speed, duplex and user key(which is yet not supported)
 -		//              ------------------------------------------------------------
 -		// Port key :   | User key                       |      Speed       |Duplex|
 -		//              ------------------------------------------------------------
 -		//              16                               6               1 0
 -		port->actor_admin_port_key = 0;	// initialize this parameter
 +		/* key is determined according to the link speed, duplex and user key(which
 +		 * is yet not supported)
 +		 *              ------------------------------------------------------------
 +		 * Port key :   | User key                       |      Speed       |Duplex|
 +		 *              ------------------------------------------------------------
 +		 *              16                               6               1 0
 +		 */
 +		port->actor_admin_port_key = 0;	/* initialize this parameter */
  		port->actor_admin_port_key |= __get_duplex(port);
  		port->actor_admin_port_key |= (__get_link_speed(port) << 1);
  		port->actor_oper_port_key = port->actor_admin_port_key;
 -		// if the port is not full duplex, then the port should be not lacp Enabled
 +		/* if the port is not full duplex, then the port should be not lacp Enabled */
  		if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
  			port->sm_vars &= ~AD_PORT_LACP_ENABLED;
 -		// actor system is the bond's system
 +		/* actor system is the bond's system */
  		port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
 -		// tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second)
 +		/* tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second) */
  		port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
  		port->aggregator = NULL;
  		port->next_port_in_aggregator = NULL;
  
  		__disable_port(port);
  
 -		// aggregator initialization
 +		/* aggregator initialization */
  		aggregator = &(SLAVE_AD_INFO(slave).aggregator);
  
  		ad_initialize_agg(aggregator);
@@@ -1906,6 -1897,8 +1906,6 @@@
  		aggregator->is_active = 0;
  		aggregator->num_of_ports = 0;
  	}
 -
 -	return 0;
  }
  
  /**
@@@ -2076,18 -2069,17 +2076,18 @@@ void bond_3ad_state_machine_handler(str
  	struct port *port;
  
  	read_lock(&bond->lock);
 +	rcu_read_lock();
  
 -	//check if there are any slaves
 +	/* check if there are any slaves */
  	if (!bond_has_slaves(bond))
  		goto re_arm;
  
 -	// check if agg_select_timer timer after initialize is timed out
 +	/* check if agg_select_timer timer after initialize is timed out */
  	if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
 -		slave = bond_first_slave(bond);
 +		slave = bond_first_slave_rcu(bond);
  		port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
  
 -		// select the active aggregator for the bond
 +		/* select the active aggregator for the bond */
  		if (port) {
  			if (!port->slave) {
  				pr_warning("%s: Warning: bond's first port is uninitialized\n",
@@@ -2101,8 -2093,8 +2101,8 @@@
  		bond_3ad_set_carrier(bond);
  	}
  
 -	// for each port run the state machines
 -	bond_for_each_slave(bond, slave, iter) {
 +	/* for each port run the state machines */
 +	bond_for_each_slave_rcu(bond, slave, iter) {
  		port = &(SLAVE_AD_INFO(slave).port);
  		if (!port->slave) {
  			pr_warning("%s: Warning: Found an uninitialized port\n",
@@@ -2122,7 -2114,7 +2122,7 @@@
  		ad_mux_machine(port);
  		ad_tx_machine(port);
  
 -		// turn off the BEGIN bit, since we already handled it
 +		/* turn off the BEGIN bit, since we already handled it */
  		if (port->sm_vars & AD_PORT_BEGIN)
  			port->sm_vars &= ~AD_PORT_BEGIN;
  
@@@ -2130,9 -2122,9 +2130,9 @@@
  	}
  
  re_arm:
 -	queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
 -
 +	rcu_read_unlock();
  	read_unlock(&bond->lock);
 +	queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
  }
  
  /**
@@@ -2209,20 -2201,25 +2209,25 @@@ void bond_3ad_adapter_speed_changed(str
  
  	port = &(SLAVE_AD_INFO(slave).port);
  
- 	// if slave is null, the whole port is not initialized
+ 	/* if slave is null, the whole port is not initialized */
  	if (!port->slave) {
  		pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
  			   slave->bond->dev->name, slave->dev->name);
  		return;
  	}
  
+ 	__get_state_machine_lock(port);
+ 
  	port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
  	port->actor_oper_port_key = port->actor_admin_port_key |=
  		(__get_link_speed(port) << 1);
  	pr_debug("Port %d changed speed\n", port->actor_port_number);
- 	// there is no need to reselect a new aggregator, just signal the
- 	// state machines to reinitialize
+ 	/* there is no need to reselect a new aggregator, just signal the
+ 	 * state machines to reinitialize
+ 	 */
  	port->sm_vars |= AD_PORT_BEGIN;
+ 
+ 	__release_state_machine_lock(port);
  }
  
  /**
@@@ -2237,20 -2234,25 +2242,25 @@@ void bond_3ad_adapter_duplex_changed(st
  
  	port = &(SLAVE_AD_INFO(slave).port);
  
- 	// if slave is null, the whole port is not initialized
+ 	/* if slave is null, the whole port is not initialized */
  	if (!port->slave) {
  		pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
  			   slave->bond->dev->name, slave->dev->name);
  		return;
  	}
  
+ 	__get_state_machine_lock(port);
+ 
  	port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
  	port->actor_oper_port_key = port->actor_admin_port_key |=
  		__get_duplex(port);
  	pr_debug("Port %d changed duplex\n", port->actor_port_number);
- 	// there is no need to reselect a new aggregator, just signal the
- 	// state machines to reinitialize
+ 	/* there is no need to reselect a new aggregator, just signal the
+ 	 * state machines to reinitialize
+ 	 */
  	port->sm_vars |= AD_PORT_BEGIN;
+ 
+ 	__release_state_machine_lock(port);
  }
  
  /**
@@@ -2266,15 -2268,21 +2276,21 @@@ void bond_3ad_handle_link_change(struc
  
  	port = &(SLAVE_AD_INFO(slave).port);
  
- 	// if slave is null, the whole port is not initialized
+ 	/* if slave is null, the whole port is not initialized */
  	if (!port->slave) {
  		pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
  			   slave->bond->dev->name, slave->dev->name);
  		return;
  	}
  
- 	// on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
- 	// on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
+ 	__get_state_machine_lock(port);
+ 	/* on link down we are zeroing duplex and speed since
+ 	 * some of the adaptors(ce1000.lan) report full duplex/speed
+ 	 * instead of N/A(duplex) / 0(speed).
+ 	 *
+ 	 * on link up we are forcing recheck on the duplex and speed since
+ 	 * some of he adaptors(ce1000.lan) report.
+ 	 */
  	if (link == BOND_LINK_UP) {
  		port->is_enabled = true;
  		port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@@ -2290,10 -2298,15 +2306,15 @@@
  		port->actor_oper_port_key = (port->actor_admin_port_key &=
  					     ~AD_SPEED_KEY_BITS);
  	}
- 	//BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
- 	// there is no need to reselect a new aggregator, just signal the
- 	// state machines to reinitialize
+ 	pr_debug("Port %d changed link status to %s",
+ 		port->actor_port_number,
+ 		(link == BOND_LINK_UP) ? "UP" : "DOWN");
+ 	/* there is no need to reselect a new aggregator, just signal the
+ 	 * state machines to reinitialize
+ 	 */
  	port->sm_vars |= AD_PORT_BEGIN;
+ 
+ 	__release_state_machine_lock(port);
  }
  
  /*
@@@ -2311,9 -2324,7 +2332,9 @@@ int bond_3ad_set_carrier(struct bondin
  	struct aggregator *active;
  	struct slave *first_slave;
  
 -	first_slave = bond_first_slave(bond);
 +	rcu_read_lock();
 +	first_slave = bond_first_slave_rcu(bond);
 +	rcu_read_unlock();
  	if (!first_slave)
  		return 0;
  	active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
@@@ -2395,12 -2406,13 +2416,12 @@@ int bond_3ad_xmit_xor(struct sk_buff *s
  	struct list_head *iter;
  	int slaves_in_agg;
  	int slave_agg_no;
 -	int res = 1;
  	int agg_id;
  
  	if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
  		pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
  			 dev->name);
 -		goto out;
 +		goto err_free;
  	}
  
  	slaves_in_agg = ad_info.ports;
@@@ -2408,7 -2420,7 +2429,7 @@@
  
  	if (slaves_in_agg == 0) {
  		pr_debug("%s: Error: active aggregator is empty\n", dev->name);
 -		goto out;
 +		goto err_free;
  	}
  
  	slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
@@@ -2427,7 -2439,7 +2448,7 @@@
  		}
  
  		if (SLAVE_IS_OK(slave)) {
 -			res = bond_dev_queue_xmit(bond, skb, slave->dev);
 +			bond_dev_queue_xmit(bond, skb, slave->dev);
  			goto out;
  		}
  	}
@@@ -2435,22 -2447,21 +2456,22 @@@
  	if (slave_agg_no >= 0) {
  		pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
  		       dev->name, agg_id);
 -		goto out;
 +		goto err_free;
  	}
  
  	/* we couldn't find any suitable slave after the agg_no, so use the
  	 * first suitable found, if found. */
  	if (first_ok_slave)
 -		res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
 +		bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
 +	else
 +		goto err_free;
  
  out:
 -	if (res) {
 -		/* no suitable interface, frame not sent */
 -		kfree_skb(skb);
 -	}
 -
  	return NETDEV_TX_OK;
 +err_free:
 +	/* no suitable interface, frame not sent */
 +	kfree_skb(skb);
 +	goto out;
  }
  
  int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
diff --combined drivers/net/ethernet/arc/emac_main.c
index eedf2a5,248baf6..eeecc29
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@@ -381,7 -381,17 +381,7 @@@ static int arc_emac_open(struct net_dev
  	phy_dev->autoneg = AUTONEG_ENABLE;
  	phy_dev->speed = 0;
  	phy_dev->duplex = 0;
 -	phy_dev->advertising = phy_dev->supported;
 -
 -	if (priv->max_speed > 100) {
 -		phy_dev->advertising &= PHY_GBIT_FEATURES;
 -	} else if (priv->max_speed <= 100) {
 -		phy_dev->advertising &= PHY_BASIC_FEATURES;
 -		if (priv->max_speed <= 10) {
 -			phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
 -			phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
 -		}
 -	}
 +	phy_dev->advertising &= phy_dev->supported;
  
  	priv->last_rx_bd = 0;
  
@@@ -555,6 -565,8 +555,8 @@@ static int arc_emac_tx(struct sk_buff *
  	/* Make sure pointer to data buffer is set */
  	wmb();
  
+ 	skb_tx_timestamp(skb);
+ 
  	*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
  
  	/* Increment index to point to the next BD */
@@@ -569,8 -581,6 +571,6 @@@
  
  	arc_reg_set(priv, R_STATUS, TXPL_MASK);
  
- 	skb_tx_timestamp(skb);
- 
  	return NETDEV_TX_OK;
  }
  
@@@ -694,6 -704,14 +694,6 @@@ static int arc_emac_probe(struct platfo
  	/* Set poll rate so that it polls every 1 ms */
  	arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
  
 -	/* Get max speed of operation from device tree */
 -	if (of_property_read_u32(pdev->dev.of_node, "max-speed",
 -				 &priv->max_speed)) {
 -		dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
 -		err = -EINVAL;
 -		goto out;
 -	}
 -
  	ndev->irq = irq;
  	dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
  
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index dad6790,2d5fce4..eb105ab
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@@ -472,7 -472,7 +472,7 @@@ struct bnx2x_agg_info 
  	u16			vlan_tag;
  	u16			len_on_bd;
  	u32			rxhash;
 -	bool			l4_rxhash;
 +	enum pkt_hash_types	rxhash_type;
  	u16			gro_size;
  	u16			full_page;
  };
@@@ -1250,7 -1250,10 +1250,10 @@@ struct bnx2x_slowpath 
  	 * Therefore, if they would have been defined in the same union,
  	 * data can get corrupted.
  	 */
- 	struct afex_vif_list_ramrod_data func_afex_rdata;
+ 	union {
+ 		struct afex_vif_list_ramrod_data	viflist_data;
+ 		struct function_update_data		func_update;
+ 	} func_afex_rdata;
  
  	/* used by dmae command executer */
  	struct dmae_command		dmae[MAX_DMAE_C];
@@@ -1546,7 -1549,6 +1549,7 @@@ struct bnx2x 
  #define INTERRUPTS_ENABLED_FLAG		(1 << 23)
  #define BC_SUPPORTS_RMMOD_CMD		(1 << 24)
  #define HAS_PHYS_PORT_ID		(1 << 25)
 +#define AER_ENABLED			(1 << 26)
  
  #define BP_NOMCP(bp)			((bp)->flags & NO_MCP_FLAG)
  
@@@ -2437,8 -2439,7 +2440,8 @@@ void bnx2x_igu_clear_sb_gen(struct bnx2
  
  #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
  			    (!((me_reg) & ME_REG_VF_ERR)))
 -int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
 +int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
 +
  /* Congestion management fairness mode */
  #define CMNG_FNS_NONE			0
  #define CMNG_FNS_MINMAX			1
@@@ -2501,4 -2502,6 +2504,6 @@@ void bnx2x_set_local_cmng(struct bnx2x 
  #define MCPR_SCRATCH_BASE(bp) \
  	(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
  
+ #define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+ 
  #endif /* bnx2x.h */
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d3748bf,8b3107b..18498fe
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@@ -27,7 -27,6 +27,7 @@@
  #include <linux/slab.h>
  #include <linux/interrupt.h>
  #include <linux/pci.h>
 +#include <linux/aer.h>
  #include <linux/init.h>
  #include <linux/netdevice.h>
  #include <linux/etherdevice.h>
@@@ -3298,10 -3297,6 +3298,10 @@@ static void bnx2x_drv_info_ether_stat(s
  
  	ether_stat->txq_size = bp->tx_ring_size;
  	ether_stat->rxq_size = bp->rx_ring_size;
 +
 +#ifdef CONFIG_BNX2X_SRIOV
 +	ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
 +#endif
  }
  
  static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
@@@ -9859,64 -9854,6 +9859,64 @@@ static void bnx2x_prev_unload_close_mac
  #define BNX2X_PREV_UNDI_BD(val)		((val) >> 16 & 0xffff)
  #define BNX2X_PREV_UNDI_PROD(rcq, bd)	((bd) << 16 | (rcq))
  
 +#define BCM_5710_UNDI_FW_MF_MAJOR	(0x07)
 +#define BCM_5710_UNDI_FW_MF_MINOR	(0x08)
 +#define BCM_5710_UNDI_FW_MF_VERS	(0x05)
 +#define BNX2X_PREV_UNDI_MF_PORT(p)	(0x1a150c + ((p) << 4))
 +#define BNX2X_PREV_UNDI_MF_FUNC(f)	(0x1a184c + ((f) << 4))
 +static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
 +{
 +	u8 major, minor, version;
 +	u32 fw;
 +
 +	/* Must check that FW is loaded */
 +	if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
 +	     MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
 +		BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
 +		return false;
 +	}
 +
 +	/* Read Currently loaded FW version */
 +	fw = REG_RD(bp, XSEM_REG_PRAM);
 +	major = fw & 0xff;
 +	minor = (fw >> 0x8) & 0xff;
 +	version = (fw >> 0x10) & 0xff;
 +	BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
 +		       fw, major, minor, version);
 +
 +	if (major > BCM_5710_UNDI_FW_MF_MAJOR)
 +		return true;
 +
 +	if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
 +	    (minor > BCM_5710_UNDI_FW_MF_MINOR))
 +		return true;
 +
 +	if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
 +	    (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
 +	    (version >= BCM_5710_UNDI_FW_MF_VERS))
 +		return true;
 +
 +	return false;
 +}
 +
 +static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
 +{
 +	int i;
 +
 +	/* Due to legacy (FW) code, the first function on each engine has a
 +	 * different offset macro from the rest of the functions.
 +	 * Setting this for all 8 functions is harmless regardless of whether
 +	 * this is actually a multi-function device.
 +	 */
 +	for (i = 0; i < 2; i++)
 +		REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
 +
 +	for (i = 2; i < 8; i++)
 +		REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
 +
 +	BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
 +}
 +
  static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
  {
  	u16 rcq, bd;
@@@ -10117,7 -10054,7 +10117,7 @@@ static int bnx2x_prev_unload_uncommon(s
  	 * the one required, then FLR will be sufficient to clean any residue
  	 * left by previous driver
  	 */
 -	rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
 +	rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
  
  	if (!rc) {
  		/* fw version is good */
@@@ -10205,17 -10142,10 +10205,17 @@@ static int bnx2x_prev_unload_common(str
  			else
  				timer_count--;
  
 -			/* If UNDI resides in memory, manually increment it */
 -			if (prev_undi)
 +			/* New UNDI FW supports MF and contains better
 +			 * cleaning methods - might be redundant but harmless.
 +			 */
 +			if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
 +				bnx2x_prev_unload_undi_mf(bp);
 +			} else if (prev_undi) {
 +				/* If UNDI resides in memory,
 +				 * manually increment it
 +				 */
  				bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
 -
 +			}
  			udelay(10);
  		}
  
@@@ -10335,8 -10265,8 +10335,8 @@@ static int bnx2x_prev_unload(struct bnx
  	} while (--time_counter);
  
  	if (!time_counter || rc) {
 -		BNX2X_ERR("Failed unloading previous driver, aborting\n");
 -		rc = -EBUSY;
 +		BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
 +		rc = -EPROBE_DEFER;
  	}
  
  	/* Mark function if its port was used to boot from SAN */
@@@ -11517,9 -11447,9 +11517,9 @@@ static int bnx2x_get_hwinfo(struct bnx2
  		}
  	}
  
- 	/* adjust igu_sb_cnt to MF for E1x */
- 	if (CHIP_IS_E1x(bp) && IS_MF(bp))
- 		bp->igu_sb_cnt /= E1HVN_MAX;
+ 	/* adjust igu_sb_cnt to MF for E1H */
+ 	if (CHIP_IS_E1H(bp) && IS_MF(bp))
+ 		bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
  
  	/* port info */
  	bnx2x_get_port_hwinfo(bp);
@@@ -11706,11 -11636,7 +11706,11 @@@ static int bnx2x_init_bp(struct bnx2x *
  							DRV_MSG_SEQ_NUMBER_MASK;
  		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
  
 -		bnx2x_prev_unload(bp);
 +		rc = bnx2x_prev_unload(bp);
 +		if (rc) {
 +			bnx2x_free_mem_bp(bp);
 +			return rc;
 +		}
  	}
  
  	if (CHIP_REV_IS_FPGA(bp))
@@@ -12230,14 -12156,6 +12230,14 @@@ static int bnx2x_set_coherency_mask(str
  	return 0;
  }
  
 +static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
 +{
 +	if (bp->flags & AER_ENABLED) {
 +		pci_disable_pcie_error_reporting(bp->pdev);
 +		bp->flags &= ~AER_ENABLED;
 +	}
 +}
 +
  static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
  			  struct net_device *dev, unsigned long board_type)
  {
@@@ -12344,14 -12262,6 +12344,14 @@@
  	/* clean indirect addresses */
  	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  			       PCICFG_VENDOR_ID_OFFSET);
 +
 +	/* AER (Advanced Error reporting) configuration */
 +	rc = pci_enable_pcie_error_reporting(pdev);
 +	if (!rc)
 +		bp->flags |= AER_ENABLED;
 +	else
 +		BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
 +
  	/*
  	 * Clean the following indirect addresses for all functions since it
  	 * is not used by the driver.
@@@ -12959,8 -12869,6 +12959,8 @@@ static int bnx2x_init_one(struct pci_de
  	return 0;
  
  init_one_exit:
 +	bnx2x_disable_pcie_error_reporting(bp);
 +
  	if (bp->regview)
  		iounmap(bp->regview);
  
@@@ -13034,8 -12942,6 +13034,8 @@@ static void __bnx2x_remove(struct pci_d
  		pci_set_power_state(pdev, PCI_D3hot);
  	}
  
 +	bnx2x_disable_pcie_error_reporting(bp);
 +
  	if (bp->regview)
  		iounmap(bp->regview);
  
@@@ -13213,14 -13119,6 +13213,14 @@@ static pci_ers_result_t bnx2x_io_slot_r
  
  	rtnl_unlock();
  
 +	/* If AER, perform cleanup of the PCIe registers */
 +	if (bp->flags & AER_ENABLED) {
 +		if (pci_cleanup_aer_uncorrect_error_status(pdev))
 +			BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
 +		else
 +			DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
 +	}
 +
  	return PCI_ERS_RESULT_RECOVERED;
  }
  
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 08f8047,14ffb6e..2beb543
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@@ -5932,7 -5932,6 +5932,7 @@@
  #define MISC_REGISTERS_RESET_REG_1_RST_NIG			 (0x1<<7)
  #define MISC_REGISTERS_RESET_REG_1_RST_PXP			 (0x1<<26)
  #define MISC_REGISTERS_RESET_REG_1_RST_PXPV			 (0x1<<27)
 +#define MISC_REGISTERS_RESET_REG_1_RST_XSEM			 (0x1<<22)
  #define MISC_REGISTERS_RESET_REG_1_SET				 0x584
  #define MISC_REGISTERS_RESET_REG_2_CLEAR			 0x598
  #define MISC_REGISTERS_RESET_REG_2_MSTAT0			 (0x1<<24)
@@@ -7180,6 -7179,7 +7180,7 @@@ Theotherbitsarereservedandshouldbezero*
  #define MDIO_WC_REG_RX1_PCI_CTRL			0x80ca
  #define MDIO_WC_REG_RX2_PCI_CTRL			0x80da
  #define MDIO_WC_REG_RX3_PCI_CTRL			0x80ea
+ #define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI		0x80fa
  #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G		0x8104
  #define MDIO_WC_REG_XGXS_STATUS3			0x8129
  #define MDIO_WC_REG_PAR_DET_10G_STATUS			0x8130
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index babf7b9,18438a5..98cccd4
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@@ -663,7 -663,7 +663,7 @@@ static int bnx2x_check_mac_add(struct b
  
  	/* Check if a requested MAC already exists */
  	list_for_each_entry(pos, &o->head, link)
 -		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
 +		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
  		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
  			return -EEXIST;
  
@@@ -696,7 -696,8 +696,7 @@@ static int bnx2x_check_vlan_mac_add(str
  
  	list_for_each_entry(pos, &o->head, link)
  		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
 -		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
 -				  ETH_ALEN)) &&
 +		    ether_addr_equal_unaligned(data->vlan_mac.mac, pos->u.vlan_mac.mac) &&
  		    (data->vlan_mac.is_inner_mac ==
  		     pos->u.vlan_mac.is_inner_mac))
  			return -EEXIST;
@@@ -715,7 -716,7 +715,7 @@@ static struct bnx2x_vlan_mac_registry_e
  	DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
  
  	list_for_each_entry(pos, &o->head, link)
 -		if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
 +		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
  		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
  			return pos;
  
@@@ -750,7 -751,8 +750,7 @@@ static struct bnx2x_vlan_mac_registry_e
  
  	list_for_each_entry(pos, &o->head, link)
  		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
 -		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
 -			     ETH_ALEN)) &&
 +		    ether_addr_equal_unaligned(data->vlan_mac.mac, pos->u.vlan_mac.mac) &&
  		    (data->vlan_mac.is_inner_mac ==
  		     pos->u.vlan_mac.is_inner_mac))
  			return pos;
@@@ -2036,6 -2038,7 +2036,7 @@@ static int bnx2x_vlan_mac_del_all(struc
  	struct bnx2x_vlan_mac_ramrod_params p;
  	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
  	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ 	unsigned long flags;
  	int read_lock;
  	int rc = 0;
  
@@@ -2044,8 -2047,9 +2045,9 @@@
  	spin_lock_bh(&exeq->lock);
  
  	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
- 		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
- 		    *vlan_mac_flags) {
+ 		flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+ 		if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ 		    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
  			rc = exeq->remove(bp, exeq->owner, exeq_pos);
  			if (rc) {
  				BNX2X_ERR("Failed to remove command\n");
@@@ -2078,7 -2082,9 +2080,9 @@@
  		return read_lock;
  
  	list_for_each_entry(pos, &o->head, link) {
- 		if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ 		flags = pos->vlan_mac_flags;
+ 		if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ 		    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
  			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
  			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
  			rc = bnx2x_config_vlan_mac(bp, &p);
@@@ -4380,8 -4386,11 +4384,11 @@@ int bnx2x_config_rss(struct bnx2x *bp
  	struct bnx2x_raw_obj *r = &o->raw;
  
  	/* Do nothing if only driver cleanup was requested */
- 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ 		DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+ 		   p->ramrod_flags);
  		return 0;
+ 	}
  
  	r->set_pending(r);
  
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fe52d3,e7845e5..31ab924
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@@ -166,7 -166,6 +166,7 @@@ enum bnx2x_vfop_qteardown_state 
  	   BNX2X_VFOP_QTEARDOWN_RXMODE,
  	   BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
  	   BNX2X_VFOP_QTEARDOWN_CLR_MAC,
 +	   BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
  	   BNX2X_VFOP_QTEARDOWN_QDTOR,
  	   BNX2X_VFOP_QTEARDOWN_DONE
  };
@@@ -1113,10 -1112,7 +1113,10 @@@ static void bnx2x_vfop_mcast(struct bnx
  	switch (state) {
  	case BNX2X_VFOP_MCAST_DEL:
  		/* clear existing mcasts */
 -		vfop->state = BNX2X_VFOP_MCAST_ADD;
 +		vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
 +					     : BNX2X_VFOP_MCAST_CHK_DONE;
 +		mcast->mcast_list_len = vf->mcast_list_len;
 +		vf->mcast_list_len = args->mc_num;
  		vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
  		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
  
@@@ -1124,17 -1120,17 +1124,17 @@@
  		if (raw->check_pending(raw))
  			goto op_pending;
  
 -		if (args->mc_num) {
 -			/* update mcast list on the ramrod params */
 -			INIT_LIST_HEAD(&mcast->mcast_list);
 -			for (i = 0; i < args->mc_num; i++)
 -				list_add_tail(&(args->mc[i].link),
 -					      &mcast->mcast_list);
 -			/* add new mcasts */
 -			vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
 -			vfop->rc = bnx2x_config_mcast(bp, mcast,
 -						      BNX2X_MCAST_CMD_ADD);
 -		}
 +		/* update mcast list on the ramrod params */
 +		INIT_LIST_HEAD(&mcast->mcast_list);
 +		for (i = 0; i < args->mc_num; i++)
 +			list_add_tail(&(args->mc[i].link),
 +				      &mcast->mcast_list);
 +		mcast->mcast_list_len = args->mc_num;
 +
 +		/* add new mcasts */
 +		vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
 +		vfop->rc = bnx2x_config_mcast(bp, mcast,
 +					      BNX2X_MCAST_CMD_ADD);
  		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
  
  	case BNX2X_VFOP_MCAST_CHK_DONE:
@@@ -1213,6 -1209,11 +1213,11 @@@ static void bnx2x_vfop_rxmode(struct bn
  		/* next state */
  		vfop->state = BNX2X_VFOP_RXMODE_DONE;
  
+ 		/* record the accept flags in vfdb so hypervisor can modify them
+ 		 * if necessary
+ 		 */
+ 		bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
+ 			ramrod->rx_accept_flags;
  		vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
  		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
  op_err:
@@@ -1228,39 -1229,43 +1233,43 @@@ op_pending
  	return;
  }
  
+ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+ 				  struct bnx2x_rx_mode_ramrod_params *ramrod,
+ 				  struct bnx2x_virtf *vf,
+ 				  unsigned long accept_flags)
+ {
+ 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+ 
+ 	memset(ramrod, 0, sizeof(*ramrod));
+ 	ramrod->cid = vfq->cid;
+ 	ramrod->cl_id = vfq_cl_id(vf, vfq);
+ 	ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ 	ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+ 	ramrod->rx_accept_flags = accept_flags;
+ 	ramrod->tx_accept_flags = accept_flags;
+ 	ramrod->pstate = &vf->filter_state;
+ 	ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+ 
+ 	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ 	set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ 	set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+ 
+ 	ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ 	ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+ }
+ 
  int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
  			  struct bnx2x_virtf *vf,
  			  struct bnx2x_vfop_cmd *cmd,
  			  int qid, unsigned long accept_flags)
  {
- 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
  	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
  
  	if (vfop) {
  		struct bnx2x_rx_mode_ramrod_params *ramrod =
  			&vf->op_params.rx_mode;
  
- 		memset(ramrod, 0, sizeof(*ramrod));
- 
- 		/* Prepare ramrod parameters */
- 		ramrod->cid = vfq->cid;
- 		ramrod->cl_id = vfq_cl_id(vf, vfq);
- 		ramrod->rx_mode_obj = &bp->rx_mode_obj;
- 		ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
- 
- 		ramrod->rx_accept_flags = accept_flags;
- 		ramrod->tx_accept_flags = accept_flags;
- 		ramrod->pstate = &vf->filter_state;
- 		ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
- 
- 		set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
- 		set_bit(RAMROD_RX, &ramrod->ramrod_flags);
- 		set_bit(RAMROD_TX, &ramrod->ramrod_flags);
- 
- 		ramrod->rdata =
- 			bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
- 		ramrod->rdata_mapping =
- 			bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+ 		bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
  
  		bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
  				 bnx2x_vfop_rxmode, cmd->done);
@@@ -1307,19 -1312,12 +1316,19 @@@ static void bnx2x_vfop_qdown(struct bnx
  
  	case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
  		/* mac-clear-all: consume credit */
 -		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
 +		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
  		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
  		if (vfop->rc)
  			goto op_err;
  		return;
  
 +	case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
 +		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
 +		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
 +		if (vfop->rc)
 +			goto op_err;
 +		return;
 +
  	case BNX2X_VFOP_QTEARDOWN_QDTOR:
  		/* run the queue destruction flow */
  		DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
@@@ -2199,7 -2197,6 +2208,7 @@@ int bnx2x_iov_nic_init(struct bnx2x *bp
  		 *  It needs to be initialized here so that it can be safely
  		 *  handled by a subsequent FLR flow.
  		 */
 +		vf->mcast_list_len = 0;
  		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
  				     0xFF, 0xFF, 0xFF,
  				     bnx2x_vf_sp(bp, vf, mcast_rdata),
@@@ -2860,9 -2857,13 +2869,9 @@@ static void bnx2x_vfop_close(struct bnx
  				goto op_err;
  			return;
  		}
 -
 -		/* remove multicasts */
  		vfop->state = BNX2X_VFOP_CLOSE_HW;
 -		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
 -		if (vfop->rc)
 -			goto op_err;
 -		return;
 +		vfop->rc = 0;
 +		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
  
  	case BNX2X_VFOP_CLOSE_HW:
  
@@@ -2896,9 -2897,6 +2905,9 @@@ op_done
  
  	DP(BNX2X_MSG_IOV, "set state to acquired\n");
  	bnx2x_vfop_end(bp, vf, vfop);
 +op_pending:
 +	/* Not supported at the moment; Exists for macros only */
 +	return;
  }
  
  int bnx2x_vfop_close_cmd(struct bnx2x *bp,
@@@ -3213,13 -3211,16 +3222,16 @@@ int bnx2x_enable_sriov(struct bnx2x *bp
  		bnx2x_iov_static_resc(bp, vf);
  	}
  
- 	/* prepare msix vectors in VF configuration space */
+ 	/* prepare msix vectors in VF configuration space - the value in the
+ 	 * PCI configuration space should be the index of the last entry,
+ 	 * namely one less than the actual size of the table
+ 	 */
  	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
  		bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
  		REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
- 		       num_vf_queues);
+ 		       num_vf_queues - 1);
  		DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
- 		   vf_idx, num_vf_queues);
+ 		   vf_idx, num_vf_queues - 1);
  	}
  	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  
@@@ -3447,10 -3448,18 +3459,18 @@@ out
  
  int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
  {
+ 	struct bnx2x_queue_state_params q_params = {NULL};
+ 	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ 	struct bnx2x_queue_update_params *update_params;
+ 	struct pf_vf_bulletin_content *bulletin = NULL;
+ 	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
  	struct bnx2x *bp = netdev_priv(dev);
- 	int rc, q_logical_state;
+ 	struct bnx2x_vlan_mac_obj *vlan_obj;
+ 	unsigned long vlan_mac_flags = 0;
+ 	unsigned long ramrod_flags = 0;
  	struct bnx2x_virtf *vf = NULL;
- 	struct pf_vf_bulletin_content *bulletin = NULL;
+ 	unsigned long accept_flags;
+ 	int rc;
  
  	/* sanity and init */
  	rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@@ -3468,104 -3477,118 +3488,118 @@@
  	/* update PF's copy of the VF's bulletin. No point in posting the vlan
  	 * to the VF since it doesn't have anything to do with it. But it useful
  	 * to store it here in case the VF is not up yet and we can only
- 	 * configure the vlan later when it does.
+ 	 * configure the vlan later when it does. Treat vlan id 0 as remove the
+ 	 * Host tag.
  	 */
- 	bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ 	if (vlan > 0)
+ 		bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ 	else
+ 		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
  	bulletin->vlan = vlan;
  
  	/* is vf initialized and queue set up? */
- 	q_logical_state =
- 		bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
- 	if (vf->state == VF_ENABLED &&
- 	    q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- 		/* configure the vlan in device on this vf's queue */
- 		unsigned long ramrod_flags = 0;
- 		unsigned long vlan_mac_flags = 0;
- 		struct bnx2x_vlan_mac_obj *vlan_obj =
- 			&bnx2x_leading_vfq(vf, vlan_obj);
- 		struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- 		struct bnx2x_queue_state_params q_params = {NULL};
- 		struct bnx2x_queue_update_params *update_params;
+ 	if (vf->state != VF_ENABLED ||
+ 	    bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ 	    BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ 		return rc;
  
- 		rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- 		if (rc)
- 			return rc;
- 		memset(&ramrod_param, 0, sizeof(ramrod_param));
+ 	/* configure the vlan in device on this vf's queue */
+ 	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ 	rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ 	if (rc)
+ 		return rc;
  
- 		/* must lock vfpf channel to protect against vf flows */
- 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ 	/* must lock vfpf channel to protect against vf flows */
+ 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
  
- 		/* remove existing vlans */
- 		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- 		rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
- 					  &ramrod_flags);
- 		if (rc) {
- 			BNX2X_ERR("failed to delete vlans\n");
- 			rc = -EINVAL;
- 			goto out;
- 		}
+ 	/* remove existing vlans */
+ 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ 	rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ 				  &ramrod_flags);
+ 	if (rc) {
+ 		BNX2X_ERR("failed to delete vlans\n");
+ 		rc = -EINVAL;
+ 		goto out;
+ 	}
+ 
+ 	/* need to remove/add the VF's accept_any_vlan bit */
+ 	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ 	if (vlan)
+ 		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ 	else
+ 		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ 
+ 	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ 			      accept_flags);
+ 	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ 	bnx2x_config_rx_mode(bp, &rx_ramrod);
+ 
+ 	/* configure the new vlan to device */
+ 	memset(&ramrod_param, 0, sizeof(ramrod_param));
+ 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ 	ramrod_param.vlan_mac_obj = vlan_obj;
+ 	ramrod_param.ramrod_flags = ramrod_flags;
+ 	set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ 		&ramrod_param.user_req.vlan_mac_flags);
+ 	ramrod_param.user_req.u.vlan.vlan = vlan;
+ 	ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ 	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ 	if (rc) {
+ 		BNX2X_ERR("failed to configure vlan\n");
+ 		rc =  -EINVAL;
+ 		goto out;
+ 	}
  
- 		/* send queue update ramrod to configure default vlan and silent
- 		 * vlan removal
+ 	/* send queue update ramrod to configure default vlan and silent
+ 	 * vlan removal
+ 	 */
+ 	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ 	q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ 	q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
+ 	update_params = &q_params.params.update;
+ 	__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ 		  &update_params->update_flags);
+ 	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ 		  &update_params->update_flags);
+ 	if (vlan == 0) {
+ 		/* if vlan is 0 then we want to leave the VF traffic
+ 		 * untagged, and leave the incoming traffic untouched
+ 		 * (i.e. do not remove any vlan tags).
  		 */
- 		__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- 		q_params.cmd = BNX2X_Q_CMD_UPDATE;
- 		q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
- 		update_params = &q_params.params.update;
- 		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ 		__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ 			    &update_params->update_flags);
+ 		__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ 			    &update_params->update_flags);
+ 	} else {
+ 		/* configure default vlan to vf queue and set silent
+ 		 * vlan removal (the vf remains unaware of this vlan).
+ 		 */
+ 		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
  			  &update_params->update_flags);
- 		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ 		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
  			  &update_params->update_flags);
+ 		update_params->def_vlan = vlan;
+ 		update_params->silent_removal_value =
+ 			vlan & VLAN_VID_MASK;
+ 		update_params->silent_removal_mask = VLAN_VID_MASK;
+ 	}
  
- 		if (vlan == 0) {
- 			/* if vlan is 0 then we want to leave the VF traffic
- 			 * untagged, and leave the incoming traffic untouched
- 			 * (i.e. do not remove any vlan tags).
- 			 */
- 			__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- 				    &update_params->update_flags);
- 			__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- 				    &update_params->update_flags);
- 		} else {
- 			/* configure the new vlan to device */
- 			__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- 			ramrod_param.vlan_mac_obj = vlan_obj;
- 			ramrod_param.ramrod_flags = ramrod_flags;
- 			ramrod_param.user_req.u.vlan.vlan = vlan;
- 			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
- 			rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
- 			if (rc) {
- 				BNX2X_ERR("failed to configure vlan\n");
- 				rc =  -EINVAL;
- 				goto out;
- 			}
- 
- 			/* configure default vlan to vf queue and set silent
- 			 * vlan removal (the vf remains unaware of this vlan).
- 			 */
- 			update_params = &q_params.params.update;
- 			__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- 				  &update_params->update_flags);
- 			__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- 				  &update_params->update_flags);
- 			update_params->def_vlan = vlan;
- 		}
+ 	/* Update the Queue state */
+ 	rc = bnx2x_queue_state_change(bp, &q_params);
+ 	if (rc) {
+ 		BNX2X_ERR("Failed to configure default VLAN\n");
+ 		goto out;
+ 	}
  
- 		/* Update the Queue state */
- 		rc = bnx2x_queue_state_change(bp, &q_params);
- 		if (rc) {
- 			BNX2X_ERR("Failed to configure default VLAN\n");
- 			goto out;
- 		}
  
- 		/* clear the flag indicating that this VF needs its vlan
- 		 * (will only be set if the HV configured the Vlan before vf was
- 		 * up and we were called because the VF came up later
- 		 */
+ 	/* clear the flag indicating that this VF needs its vlan
+ 	 * (will only be set if the HV configured the Vlan before vf was
+ 	 * up and we were called because the VF came up later
+ 	 */
  out:
- 		vf->cfg_flags &= ~VF_CFG_VLAN;
- 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- 	}
+ 	vf->cfg_flags &= ~VF_CFG_VLAN;
+ 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ 
  	return rc;
  }
  
@@@ -3616,7 -3639,7 +3650,7 @@@ enum sample_bulletin_result bnx2x_sampl
  
  	/* the mac address in bulletin board is valid and is new */
  	if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
 -	    memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
 +	    !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
  		/* update new mac to net device */
  		memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
  	}
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index a5c84a7,8c213fa52..d72ab7e
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@@ -74,6 -74,7 +74,7 @@@ struct bnx2x_vf_queue 
  	/* VLANs object */
  	struct bnx2x_vlan_mac_obj	vlan_obj;
  	atomic_t vlan_count;		/* 0 means vlan-0 is set  ~ untagged */
+ 	unsigned long accept_flags;	/* last accept flags configured */
  
  	/* Queue Slow-path State object */
  	struct bnx2x_queue_sp_obj	sp_obj;
@@@ -268,7 -269,6 +269,7 @@@ struct bnx2x_virtf 
  	int leading_rss;
  
  	/* MCAST object */
 +	int mcast_list_len;
  	struct bnx2x_mcast_obj		mcast_obj;
  
  	/* RSS configuration object */
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index e5f7985,0756d7d..1b1ad31
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@@ -208,7 -208,7 +208,7 @@@ static int bnx2x_get_vf_id(struct bnx2
  		return -EINVAL;
  	}
  
- 	BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+ 	DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
  
  	*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  
@@@ -800,18 -800,14 +800,18 @@@ int bnx2x_vfpf_config_rss(struct bnx2x 
  	}
  
  	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
 -		BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
 -			  resp->hdr.status);
 -		rc = -EINVAL;
 +		/* Since older drivers don't support this feature (and VF has
 +		 * no way of knowing other than failing this), don't propagate
 +		 * an error in this case.
 +		 */
 +		DP(BNX2X_MSG_IOV,
 +		   "Failed to send rss message to PF over VF-PF channel [%d]\n",
 +		   resp->hdr.status);
  	}
  out:
  	bnx2x_vfpf_finalize(bp, &req->first_tlv);
  
 -	return 0;
 +	return rc;
  }
  
  int bnx2x_vfpf_set_mcast(struct net_device *dev)
@@@ -1420,14 -1416,6 +1420,14 @@@ static void bnx2x_vf_mbx_setup_q(struc
  				setup_q->rxq.cache_line_log;
  			rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  
 +			/* rx setup - multicast engine */
 +			if (bnx2x_vfq_is_leading(q)) {
 +				u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
 +
 +				rxq_params->mcast_engine_id = mcast_id;
 +				__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
 +			}
 +
  			bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  						 q->index, q->sb_idx);
  		}
@@@ -1610,6 -1598,8 +1610,8 @@@ static void bnx2x_vfop_mbx_qfilters(str
  
  		if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  			unsigned long accept = 0;
+ 			struct pf_vf_bulletin_content *bulletin =
+ 				BP_VF_BULLETIN(bp, vf->index);
  
  			/* covert VF-PF if mask to bnx2x accept flags */
  			if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@@ -1629,9 -1619,11 +1631,11 @@@
  				__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  
  			/* A packet arriving the vf's mac should be accepted
- 			 * with any vlan
+ 			 * with any vlan, unless a vlan has already been
+ 			 * configured.
  			 */
- 			__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+ 			if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ 				__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  
  			/* set rx-mode */
  			rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@@ -1714,7 -1706,7 +1718,7 @@@ static void bnx2x_vf_mbx_set_q_filters(
  
  		/* ...and only the mac set by the ndo */
  		if (filters->n_mac_vlan_filters == 1 &&
 -		    memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
 +		    !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
  			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  				  vf->abs_vfid);
  
@@@ -1722,6 -1714,21 +1726,21 @@@
  			goto response;
  		}
  	}
+ 	/* if vlan was set by hypervisor we don't allow guest to config vlan */
+ 	if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+ 		int i;
+ 
+ 		/* search for vlan filters */
+ 		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ 			if (filters->filters[i].flags &
+ 			    VFPF_Q_FILTER_VLAN_TAG_VALID) {
+ 				BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ 					  vf->abs_vfid);
+ 				vf->op_rc = -EPERM;
+ 				goto response;
+ 			}
+ 		}
+ 	}
  
  	/* verify vf_qid */
  	if (filters->vf_qid > vf_rxq_count(vf))
@@@ -1817,6 -1824,9 +1836,9 @@@ static void bnx2x_vf_mbx_update_rss(str
  	vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
  
  	/* flags handled individually for backward/forward compatability */
+ 	vf_op_params->rss_flags = 0;
+ 	vf_op_params->ramrod_flags = 0;
+ 
  	if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
  		__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
  	if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --combined drivers/net/ethernet/broadcom/tg3.c
index d88ef55,15a66e4..c37e9f2
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@@ -37,7 -37,6 +37,7 @@@
  #include <linux/mii.h>
  #include <linux/phy.h>
  #include <linux/brcmphy.h>
 +#include <linux/if.h>
  #include <linux/if_vlan.h>
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@@ -95,10 -94,10 +95,10 @@@ static inline void _tg3_flag_clear(enu
  
  #define DRV_MODULE_NAME		"tg3"
  #define TG3_MAJ_NUM			3
 -#define TG3_MIN_NUM			134
 +#define TG3_MIN_NUM			136
  #define DRV_MODULE_VERSION	\
  	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
 -#define DRV_MODULE_RELDATE	"Sep 16, 2013"
 +#define DRV_MODULE_RELDATE	"Jan 03, 2014"
  
  #define RESET_KIND_SHUTDOWN	0
  #define RESET_KIND_INIT		1
@@@ -209,9 -208,6 +209,9 @@@
  
  #define TG3_RAW_IP_ALIGN 2
  
 +#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
 +#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
 +
  #define TG3_FW_UPDATE_TIMEOUT_SEC	5
  #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
  
@@@ -3952,41 -3948,32 +3952,41 @@@ static int tg3_load_tso_firmware(struc
  	return 0;
  }
  
 +/* tp->lock is held. */
 +static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
 +{
 +	u32 addr_high, addr_low;
 +
 +	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
 +	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
 +		    (mac_addr[4] <<  8) | mac_addr[5]);
 +
 +	if (index < 4) {
 +		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
 +		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
 +	} else {
 +		index -= 4;
 +		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
 +		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
 +	}
 +}
  
  /* tp->lock is held. */
  static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
  {
 -	u32 addr_high, addr_low;
 +	u32 addr_high;
  	int i;
  
  	for (i = 0; i < 4; i++) {
  		if (i == 1 && skip_mac_1)
  			continue;
 -		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
 -		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
 +		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
  	}
  
  	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
  	    tg3_asic_rev(tp) == ASIC_REV_5704) {
 -		for (i = 0; i < 12; i++) {
 -			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
 -			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
 -		}
 +		for (i = 4; i < 16; i++)
 +			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
  	}
  
  	addr_high = (tp->dev->dev_addr[0] +
@@@ -4416,12 -4403,9 +4416,12 @@@ static void tg3_phy_copper_begin(struc
  			if (tg3_flag(tp, WOL_SPEED_100MB))
  				adv |= ADVERTISED_100baseT_Half |
  				       ADVERTISED_100baseT_Full;
 -			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
 -				adv |= ADVERTISED_1000baseT_Half |
 -				       ADVERTISED_1000baseT_Full;
 +			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
 +				if (!(tp->phy_flags &
 +				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
 +					adv |= ADVERTISED_1000baseT_Half;
 +				adv |= ADVERTISED_1000baseT_Full;
 +			}
  
  			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
  		} else {
@@@ -7638,7 -7622,7 +7638,7 @@@ static inline int tg3_4g_overflow_test(
  {
  	u32 base = (u32) mapping & 0xffffffff;
  
- 	return (base > 0xffffdcc0) && (base + len + 8 < base);
+ 	return base + len + 8 < base;
  }
  
  /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
@@@ -8941,49 -8925,6 +8941,49 @@@ static void tg3_restore_pci_state(struc
  	}
  }
  
 +static void tg3_override_clk(struct tg3 *tp)
 +{
 +	u32 val;
 +
 +	switch (tg3_asic_rev(tp)) {
 +	case ASIC_REV_5717:
 +		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
 +		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
 +		     TG3_CPMU_MAC_ORIDE_ENABLE);
 +		break;
 +
 +	case ASIC_REV_5719:
 +	case ASIC_REV_5720:
 +		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
 +		break;
 +
 +	default:
 +		return;
 +	}
 +}
 +
 +static void tg3_restore_clk(struct tg3 *tp)
 +{
 +	u32 val;
 +
 +	switch (tg3_asic_rev(tp)) {
 +	case ASIC_REV_5717:
 +		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
 +		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
 +		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
 +		break;
 +
 +	case ASIC_REV_5719:
 +	case ASIC_REV_5720:
 +		val = tr32(TG3_CPMU_CLCK_ORIDE);
 +		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
 +		break;
 +
 +	default:
 +		return;
 +	}
 +}
 +
  /* tp->lock is held. */
  static int tg3_chip_reset(struct tg3 *tp)
  {
@@@ -9072,13 -9013,6 +9072,13 @@@
  		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
  	}
  
 +	/* Set the clock to the highest frequency to avoid timeouts. With link
 +	 * aware mode, the clock speed could be slow and bootcode does not
 +	 * complete within the expected time. Override the clock to allow the
 +	 * bootcode to finish sooner and then restore it.
 +	 */
 +	tg3_override_clk(tp);
 +
  	/* Manage gphy power for all CPMU absent PCIe devices. */
  	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
  		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
@@@ -9217,7 -9151,10 +9217,7 @@@
  		tw32(0x7c00, val | (1 << 25));
  	}
  
 -	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
 -		val = tr32(TG3_CPMU_CLCK_ORIDE);
 -		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
 -	}
 +	tg3_restore_clk(tp);
  
  	/* Reprobe ASF enable state.  */
  	tg3_flag_clear(tp, ENABLE_ASF);
@@@ -9249,7 -9186,6 +9249,7 @@@
  
  static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
  static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
 +static void __tg3_set_rx_mode(struct net_device *);
  
  /* tp->lock is held. */
  static int tg3_halt(struct tg3 *tp, int kind, bool silent)
@@@ -9310,7 -9246,6 +9310,7 @@@ static int tg3_set_mac_addr(struct net_
  	}
  	spin_lock_bh(&tp->lock);
  	__tg3_set_mac_addr(tp, skip_mac_1);
 +	__tg3_set_rx_mode(dev);
  	spin_unlock_bh(&tp->lock);
  
  	return err;
@@@ -9699,20 -9634,6 +9699,20 @@@ static void __tg3_set_rx_mode(struct ne
  		tw32(MAC_HASH_REG_3, mc_filter[3]);
  	}
  
 +	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
 +		rx_mode |= RX_MODE_PROMISC;
 +	} else if (!(dev->flags & IFF_PROMISC)) {
 +		/* Add all entries into to the mac addr filter list */
 +		int i = 0;
 +		struct netdev_hw_addr *ha;
 +
 +		netdev_for_each_uc_addr(ha, dev) {
 +			__tg3_set_one_mac_addr(tp, ha->addr,
 +					       i + TG3_UCAST_ADDR_IDX(tp));
 +			i++;
 +		}
 +	}
 +
  	if (rx_mode != tp->rx_mode) {
  		tp->rx_mode = rx_mode;
  		tw32_f(MAC_RX_MODE, rx_mode);
@@@ -10045,7 -9966,6 +10045,7 @@@ static int tg3_reset_hw(struct tg3 *tp
  	if (tg3_asic_rev(tp) == ASIC_REV_5719)
  		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
  	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 +	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
  		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
@@@ -10831,7 -10751,6 +10831,7 @@@ static void tg3_periodic_fetch_stats(st
  
  	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
  	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
 +	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
  	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
  	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
  		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
@@@ -10960,13 -10879,6 +10960,13 @@@ static void tg3_timer(unsigned long __o
  		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
  			   tg3_flag(tp, 5780_CLASS)) {
  			tg3_serdes_parallel_detect(tp);
 +		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
 +			u32 cpmu = tr32(TG3_CPMU_STATUS);
 +			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
 +					 TG3_CPMU_STATUS_LINK_MASK);
 +
 +			if (link_up != tp->link_up)
 +				tg3_setup_phy(tp, false);
  		}
  
  		tp->timer_counter = tp->timer_multiplier;
@@@ -11834,6 -11746,8 +11834,6 @@@ static void tg3_get_nstats(struct tg3 *
  		get_stat64(&hw_stats->rx_frame_too_long_errors) +
  		get_stat64(&hw_stats->rx_undersize_packets);
  
 -	stats->rx_over_errors = old_stats->rx_over_errors +
 -		get_stat64(&hw_stats->rxbds_empty);
  	stats->rx_frame_errors = old_stats->rx_frame_errors +
  		get_stat64(&hw_stats->rx_align_errors);
  	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
@@@ -13680,13 -13594,14 +13680,13 @@@ static void tg3_self_test(struct net_de
  
  }
  
 -static int tg3_hwtstamp_ioctl(struct net_device *dev,
 -			      struct ifreq *ifr, int cmd)
 +static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
  {
  	struct tg3 *tp = netdev_priv(dev);
  	struct hwtstamp_config stmpconf;
  
  	if (!tg3_flag(tp, PTP_CAPABLE))
 -		return -EINVAL;
 +		return -EOPNOTSUPP;
  
  	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
  		return -EFAULT;
@@@ -13767,67 -13682,6 +13767,67 @@@
  		-EFAULT : 0;
  }
  
 +static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 +{
 +	struct tg3 *tp = netdev_priv(dev);
 +	struct hwtstamp_config stmpconf;
 +
 +	if (!tg3_flag(tp, PTP_CAPABLE))
 +		return -EOPNOTSUPP;
 +
 +	stmpconf.flags = 0;
 +	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
 +			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
 +
 +	switch (tp->rxptpctl) {
 +	case 0:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
 +		break;
 +	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
 +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
 +		break;
 +	default:
 +		WARN_ON_ONCE(1);
 +		return -ERANGE;
 +	}
 +
 +	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
 +		-EFAULT : 0;
 +}
 +
  static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  {
  	struct mii_ioctl_data *data = if_mii(ifr);
@@@ -13881,10 -13735,7 +13881,10 @@@
  		return err;
  
  	case SIOCSHWTSTAMP:
 -		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
 +		return tg3_hwtstamp_set(dev, ifr);
 +
 +	case SIOCGHWTSTAMP:
 +		return tg3_hwtstamp_get(dev, ifr);
  
  	default:
  		/* do nothing */
@@@ -15005,8 -14856,7 +15005,8 @@@ static void tg3_get_eeprom_hw_cfg(struc
  	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
  	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
  		u32 nic_cfg, led_cfg;
 -		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
 +		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
 +		u32 nic_phy_id, ver, eeprom_phy_id;
  		int eeprom_phy_serdes = 0;
  
  		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@@ -15023,11 -14873,6 +15023,11 @@@
  		if (tg3_asic_rev(tp) == ASIC_REV_5785)
  			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
  
 +		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 +		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
 +		    tg3_asic_rev(tp) == ASIC_REV_5720)
 +			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
 +
  		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
  		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
  			eeprom_phy_serdes = 1;
@@@ -15180,9 -15025,6 +15180,9 @@@
  			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
  		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
  			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
 +
 +		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
 +			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
  	}
  done:
  	if (tg3_flag(tp, WOL_CAP))
@@@ -15278,11 -15120,9 +15278,11 @@@ static void tg3_phy_init_link_config(st
  {
  	u32 adv = ADVERTISED_Autoneg;
  
 -	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
 -		adv |= ADVERTISED_1000baseT_Half |
 -		       ADVERTISED_1000baseT_Full;
 +	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 +		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
 +			adv |= ADVERTISED_1000baseT_Half;
 +		adv |= ADVERTISED_1000baseT_Full;
 +	}
  
  	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  		adv |= ADVERTISED_100baseT_Half |
@@@ -16630,7 -16470,6 +16630,7 @@@ static int tg3_get_invariants(struct tg
  
  	/* Set these bits to enable statistics workaround. */
  	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 +	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
  		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
@@@ -16773,9 -16612,6 +16773,9 @@@
  	else
  		tg3_flag_clear(tp, POLL_SERDES);
  
 +	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
 +		tg3_flag_set(tp, POLL_CPMU_LINK);
 +
  	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
  	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
  	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
@@@ -17697,7 -17533,6 +17697,7 @@@ static int tg3_init_one(struct pci_dev 
  		features |= NETIF_F_LOOPBACK;
  
  	dev->hw_features |= features;
 +	dev->priv_flags |= IFF_UNICAST_FLT;
  
  	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
  	    !tg3_flag(tp, TSO_CAPABLE) &&
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 17fe50b,56e0415..b97e35c
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@@ -228,6 -228,25 +228,25 @@@ struct tp_params 
  
  	uint32_t dack_re;            /* DACK timer resolution */
  	unsigned short tx_modq[NCHAN];	/* channel to modulation queue map */
+ 
+ 	u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
+ 	u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
+ 
+ 	/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
+ 	 * subset of the set of fields which may be present in the Compressed
+ 	 * Filter Tuple portion of filters and TCP TCB connections.  The
+ 	 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+ 	 * Since a variable number of fields may or may not be present, their
+ 	 * shifted field positions within the Compressed Filter Tuple may
+ 	 * vary, or not even be present if the field isn't selected in
+ 	 * TP_VLAN_PRI_MAP.  Since some of these fields are needed in various
+ 	 * places we store their offsets here, or a -1 if the field isn't
+ 	 * present.
+ 	 */
+ 	int vlan_shift;
+ 	int vnic_shift;
+ 	int port_shift;
+ 	int protocol_shift;
  };
  
  struct vpd_params {
@@@ -919,12 -938,15 +938,14 @@@ int t4_seeprom_wp(struct adapter *adapt
  int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
  int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
  unsigned int t4_flash_cfg_addr(struct adapter *adapter);
 -int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
  int t4_get_fw_version(struct adapter *adapter, u32 *vers);
  int t4_get_tp_version(struct adapter *adapter, u32 *vers);
  int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
  	       const u8 *fw_data, unsigned int fw_size,
  	       struct fw_hdr *card_fw, enum dev_state state, int *reset);
  int t4_prep_adapter(struct adapter *adapter);
+ int t4_init_tp_params(struct adapter *adap);
+ int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
  int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
  void t4_fatal_err(struct adapter *adapter);
  int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@@ -957,6 -979,13 +978,6 @@@ int t4_fw_hello(struct adapter *adap, u
  int t4_fw_bye(struct adapter *adap, unsigned int mbox);
  int t4_early_init(struct adapter *adap, unsigned int mbox);
  int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
 -int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
 -int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
 -int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 -		  const u8 *fw_data, unsigned int size, int force);
 -int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
 -		      unsigned int mtype, unsigned int maddr,
 -		      u32 *finiver, u32 *finicsum, u32 *cfcsum);
  int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
  			  unsigned int cache_line_size);
  int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
diff --combined drivers/net/ethernet/chelsio/cxgb4/sge.c
index 4274543,cc3511a..47ffa64
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@@ -1630,8 -1630,7 +1630,8 @@@ static void do_gro(struct sge_eth_rxq *
  	skb->ip_summed = CHECKSUM_UNNECESSARY;
  	skb_record_rx_queue(skb, rxq->rspq.idx);
  	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
 -		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
 +		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
 +			     PKT_HASH_TYPE_L3);
  
  	if (unlikely(pkt->vlan_ex)) {
  		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
@@@ -1687,8 -1686,7 +1687,8 @@@ int t4_ethrx_handler(struct sge_rspq *q
  	skb->protocol = eth_type_trans(skb, q->netdev);
  	skb_record_rx_queue(skb, q->idx);
  	if (skb->dev->features & NETIF_F_RXHASH)
 -		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
 +		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
 +			     PKT_HASH_TYPE_L3);
  
  	rxq->stats.pkts++;
  
@@@ -2583,7 -2581,7 +2583,7 @@@ static int t4_sge_init_soft(struct adap
  	#undef READ_FL_BUF
  
  	if (fl_small_pg != PAGE_SIZE ||
- 	    (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
+ 	    (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
  				  (fl_large_pg & (fl_large_pg-1)) != 0))) {
  		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
  			fl_small_pg, fl_large_pg);
diff --combined drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 9903a66,e1413ea..a396475
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@@ -38,8 -38,6 +38,8 @@@
  #include "t4_regs.h"
  #include "t4fw_api.h"
  
 +static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 +			 const u8 *fw_data, unsigned int size, int force);
  /**
   *	t4_wait_op_done_val - wait until an operation is completed
   *	@adapter: the adapter performing the operation
@@@ -1072,6 -1070,62 +1072,6 @@@ unsigned int t4_flash_cfg_addr(struct a
  }
  
  /**
 - *	t4_load_cfg - download config file
 - *	@adap: the adapter
 - *	@cfg_data: the cfg text file to write
 - *	@size: text file size
 - *
 - *	Write the supplied config text file to the card's serial flash.
 - */
 -int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
 -{
 -	int ret, i, n;
 -	unsigned int addr;
 -	unsigned int flash_cfg_start_sec;
 -	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
 -
 -	addr = t4_flash_cfg_addr(adap);
 -	flash_cfg_start_sec = addr / SF_SEC_SIZE;
 -
 -	if (size > FLASH_CFG_MAX_SIZE) {
 -		dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
 -			FLASH_CFG_MAX_SIZE);
 -		return -EFBIG;
 -	}
 -
 -	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
 -			 sf_sec_size);
 -	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
 -				     flash_cfg_start_sec + i - 1);
 -	/*
 -	 * If size == 0 then we're simply erasing the FLASH sectors associated
 -	 * with the on-adapter Firmware Configuration File.
 -	 */
 -	if (ret || size == 0)
 -		goto out;
 -
 -	/* this will write to the flash up to SF_PAGE_SIZE at a time */
 -	for (i = 0; i < size; i += SF_PAGE_SIZE) {
 -		if ((size - i) <  SF_PAGE_SIZE)
 -			n = size - i;
 -		else
 -			n = SF_PAGE_SIZE;
 -		ret = t4_write_flash(adap, addr, n, cfg_data);
 -		if (ret)
 -			goto out;
 -
 -		addr += SF_PAGE_SIZE;
 -		cfg_data += SF_PAGE_SIZE;
 -	}
 -
 -out:
 -	if (ret)
 -		dev_err(adap->pdev_dev, "config file %s failed %d\n",
 -			(size == 0 ? "clear" : "download"), ret);
 -	return ret;
 -}
 -
 -/**
   *	t4_load_fw - download firmware
   *	@adap: the adapter
   *	@fw_data: the firmware image to write
@@@ -2756,7 -2810,7 +2756,7 @@@ int t4_fw_reset(struct adapter *adap, u
   *	be doing.  The only way out of this state is to RESTART the firmware
   *	...
   */
 -int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
 +static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
  {
  	int ret = 0;
  
@@@ -2821,7 -2875,7 +2821,7 @@@
   *	    the chip since older firmware won't recognize the PCIE_FW.HALT
   *	    flag and automatically RESET itself on startup.
   */
 -int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
 +static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
  {
  	if (reset) {
  		/*
@@@ -2884,8 -2938,8 +2884,8 @@@
   *	positive errno indicates that the adapter is ~probably~ intact, a
   *	negative errno indicates that things are looking bad ...
   */
 -int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 -		  const u8 *fw_data, unsigned int size, int force)
 +static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 +			 const u8 *fw_data, unsigned int size, int force)
  {
  	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
  	int reset, ret;
@@@ -2910,6 -2964,78 +2910,6 @@@
  	return t4_fw_restart(adap, mbox, reset);
  }
  
 -
 -/**
 - *	t4_fw_config_file - setup an adapter via a Configuration File
 - *	@adap: the adapter
 - *	@mbox: mailbox to use for the FW command
 - *	@mtype: the memory type where the Configuration File is located
 - *	@maddr: the memory address where the Configuration File is located
 - *	@finiver: return value for CF [fini] version
 - *	@finicsum: return value for CF [fini] checksum
 - *	@cfcsum: return value for CF computed checksum
 - *
 - *	Issue a command to get the firmware to process the Configuration
 - *	File located at the specified mtype/maddress.  If the Configuration
 - *	File is processed successfully and return value pointers are
 - *	provided, the Configuration File "[fini] section version and
 - *	checksum values will be returned along with the computed checksum.
 - *	It's up to the caller to decide how it wants to respond to the
 - *	checksums not matching but it recommended that a prominant warning
 - *	be emitted in order to help people rapidly identify changed or
 - *	corrupted Configuration Files.
 - *
 - *	Also note that it's possible to modify things like "niccaps",
 - *	"toecaps",etc. between processing the Configuration File and telling
 - *	the firmware to use the new configuration.  Callers which want to
 - *	do this will need to "hand-roll" their own CAPS_CONFIGS commands for
 - *	Configuration Files if they want to do this.
 - */
 -int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
 -		      unsigned int mtype, unsigned int maddr,
 -		      u32 *finiver, u32 *finicsum, u32 *cfcsum)
 -{
 -	struct fw_caps_config_cmd caps_cmd;
 -	int ret;
 -
 -	/*
 -	 * Tell the firmware to process the indicated Configuration File.
 -	 * If there are no errors and the caller has provided return value
 -	 * pointers for the [fini] section version, checksum and computed
 -	 * checksum, pass those back to the caller.
 -	 */
 -	memset(&caps_cmd, 0, sizeof(caps_cmd));
 -	caps_cmd.op_to_write =
 -		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
 -		      FW_CMD_REQUEST |
 -		      FW_CMD_READ);
 -	caps_cmd.cfvalid_to_len16 =
 -		htonl(FW_CAPS_CONFIG_CMD_CFVALID |
 -		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
 -		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
 -		      FW_LEN16(caps_cmd));
 -	ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
 -	if (ret < 0)
 -		return ret;
 -
 -	if (finiver)
 -		*finiver = ntohl(caps_cmd.finiver);
 -	if (finicsum)
 -		*finicsum = ntohl(caps_cmd.finicsum);
 -	if (cfcsum)
 -		*cfcsum = ntohl(caps_cmd.cfcsum);
 -
 -	/*
 -	 * And now tell the firmware to use the configuration we just loaded.
 -	 */
 -	caps_cmd.op_to_write =
 -		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
 -		      FW_CMD_REQUEST |
 -		      FW_CMD_WRITE);
 -	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
 -	return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
 -}
 -
  /**
   *	t4_fixup_host_params - fix up host-dependent parameters
   *	@adap: the adapter
@@@ -3682,6 -3808,109 +3682,109 @@@ int t4_prep_adapter(struct adapter *ada
  	return 0;
  }
  
+ /**
+  *      t4_init_tp_params - initialize adap->params.tp
+  *      @adap: the adapter
+  *
+  *      Initialize various fields of the adapter's TP Parameters structure.
+  */
+ int t4_init_tp_params(struct adapter *adap)
+ {
+ 	int chan;
+ 	u32 v;
+ 
+ 	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+ 	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+ 	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+ 
+ 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+ 	for (chan = 0; chan < NCHAN; chan++)
+ 		adap->params.tp.tx_modq[chan] = chan;
+ 
+ 	/* Cache the adapter's Compressed Filter Mode and global Incress
+ 	 * Configuration.
+ 	 */
+ 	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ 			 &adap->params.tp.vlan_pri_map, 1,
+ 			 TP_VLAN_PRI_MAP);
+ 	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ 			 &adap->params.tp.ingress_config, 1,
+ 			 TP_INGRESS_CONFIG);
+ 
+ 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+ 	 * shift positions of several elements of the Compressed Filter Tuple
+ 	 * for this adapter which we need frequently ...
+ 	 */
+ 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+ 							       F_PROTOCOL);
+ 
+ 	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+ 	 * represents the presense of an Outer VLAN instead of a VNIC ID.
+ 	 */
+ 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ 		adap->params.tp.vnic_shift = -1;
+ 
+ 	return 0;
+ }
+ 
+ /**
+  *      t4_filter_field_shift - calculate filter field shift
+  *      @adap: the adapter
+  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+  *
+  *      Return the shift position of a filter field within the Compressed
+  *      Filter Tuple.  The filter field is specified via its selection bit
+  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
+  */
+ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
+ {
+ 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+ 	unsigned int sel;
+ 	int field_shift;
+ 
+ 	if ((filter_mode & filter_sel) == 0)
+ 		return -1;
+ 
+ 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ 		switch (filter_mode & sel) {
+ 		case F_FCOE:
+ 			field_shift += W_FT_FCOE;
+ 			break;
+ 		case F_PORT:
+ 			field_shift += W_FT_PORT;
+ 			break;
+ 		case F_VNIC_ID:
+ 			field_shift += W_FT_VNIC_ID;
+ 			break;
+ 		case F_VLAN:
+ 			field_shift += W_FT_VLAN;
+ 			break;
+ 		case F_TOS:
+ 			field_shift += W_FT_TOS;
+ 			break;
+ 		case F_PROTOCOL:
+ 			field_shift += W_FT_PROTOCOL;
+ 			break;
+ 		case F_ETHERTYPE:
+ 			field_shift += W_FT_ETHERTYPE;
+ 			break;
+ 		case F_MACMATCH:
+ 			field_shift += W_FT_MACMATCH;
+ 			break;
+ 		case F_MPSHITTYPE:
+ 			field_shift += W_FT_MPSHITTYPE;
+ 			break;
+ 		case F_FRAGMENTATION:
+ 			field_shift += W_FT_FRAGMENTATION;
+ 			break;
+ 		}
+ 	}
+ 	return field_shift;
+ }
+ 
  int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
  {
  	u8 addr[6];
diff --combined drivers/net/ethernet/emulex/benet/be_main.c
index b5c238a,bf40fda..3acf137
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@@ -287,7 -287,7 +287,7 @@@ static int be_mac_addr_set(struct net_d
  	/* The MAC change did not happen, either due to lack of privilege
  	 * or PF didn't pre-provision.
  	 */
 -	if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
 +	if (!ether_addr_equal(addr->sa_data, mac)) {
  		status = -EPERM;
  		goto err;
  	}
@@@ -1581,7 -1581,7 +1581,7 @@@ static void be_rx_compl_process(struct 
  	skb->protocol = eth_type_trans(skb, netdev);
  	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
  	if (netdev->features & NETIF_F_RXHASH)
 -		skb->rxhash = rxcp->rss_hash;
 +		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
  	skb_mark_napi_id(skb, napi);
  
  	if (rxcp->vlanf)
@@@ -1639,7 -1639,7 +1639,7 @@@ static void be_rx_compl_process_gro(str
  	skb->ip_summed = CHECKSUM_UNNECESSARY;
  	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
  	if (adapter->netdev->features & NETIF_F_RXHASH)
 -		skb->rxhash = rxcp->rss_hash;
 +		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
  	skb_mark_napi_id(skb, napi);
  
  	if (rxcp->vlanf)
@@@ -2744,13 -2744,16 +2744,16 @@@ static int be_rx_qs_create(struct be_ad
  		if (!BEx_chip(adapter))
  			adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
  						RSS_ENABLE_UDP_IPV6;
+ 	} else {
+ 		/* Disable RSS, if only default RX Q is created */
+ 		adapter->rss_flags = RSS_ENABLE_NONE;
+ 	}
  
- 		rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 				       128);
- 		if (rc) {
- 			adapter->rss_flags = 0;
- 			return rc;
- 		}
+ 	rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
+ 			       128);
+ 	if (rc) {
+ 		adapter->rss_flags = RSS_ENABLE_NONE;
+ 		return rc;
  	}
  
  	/* First time posting */
@@@ -3124,11 -3127,11 +3127,11 @@@ static void BEx_get_resources(struct be
  {
  	struct pci_dev *pdev = adapter->pdev;
  	bool use_sriov = false;
+ 	int max_vfs;
  
- 	if (BE3_chip(adapter) && sriov_want(adapter)) {
- 		int max_vfs;
+ 	max_vfs = pci_sriov_get_totalvfs(pdev);
  
- 		max_vfs = pci_sriov_get_totalvfs(pdev);
+ 	if (BE3_chip(adapter) && sriov_want(adapter)) {
  		res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
  		use_sriov = res->max_vfs;
  	}
@@@ -3159,7 -3162,11 +3162,11 @@@
  					   BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
  	res->max_rx_qs = res->max_rss_qs + 1;
  
- 	res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
+ 	if (be_physfn(adapter))
+ 		res->max_evt_qs = (max_vfs > 0) ?
+ 					BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
+ 	else
+ 		res->max_evt_qs = 1;
  
  	res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
  	if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@@ -4205,7 -4212,7 +4212,7 @@@ static int be_ctrl_init(struct be_adapt
  	spin_lock_init(&adapter->mcc_lock);
  	spin_lock_init(&adapter->mcc_cq_lock);
  
- 	init_completion(&adapter->flash_compl);
+ 	init_completion(&adapter->et_cmd_compl);
  	pci_save_state(adapter->pdev);
  	return 0;
  
diff --combined drivers/net/ethernet/freescale/fec_main.c
index 05cd81a,50bb71c..6530177
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@@ -428,6 -428,8 +428,8 @@@ fec_enet_start_xmit(struct sk_buff *skb
  	/* If this was the last BD in the ring, start at the beginning again. */
  	bdp = fec_enet_get_nextdesc(bdp, fep);
  
+ 	skb_tx_timestamp(skb);
+ 
  	fep->cur_tx = bdp;
  
  	if (fep->cur_tx == fep->dirty_tx)
@@@ -436,8 -438,6 +438,6 @@@
  	/* Trigger transmission start */
  	writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  
- 	skb_tx_timestamp(skb);
- 
  	return NETDEV_TX_OK;
  }
  
@@@ -1679,12 -1679,8 +1679,12 @@@ static int fec_enet_ioctl(struct net_de
  	if (!phydev)
  		return -ENODEV;
  
 -	if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
 -		return fec_ptp_ioctl(ndev, rq, cmd);
 +	if (fep->bufdesc_ex) {
 +		if (cmd == SIOCSHWTSTAMP)
 +			return fec_ptp_set(ndev, rq);
 +		if (cmd == SIOCGHWTSTAMP)
 +			return fec_ptp_get(ndev, rq);
 +	}
  
  	return phy_mii_ioctl(phydev, rq, cmd);
  }
diff --combined drivers/net/ethernet/intel/e1000e/netdev.c
index 051d158,c30d41d..d6570b2
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@@ -5790,7 -5790,7 +5790,7 @@@ static int e1000_mii_ioctl(struct net_d
   * specified. Matching the kind of event packet is not supported, with the
   * exception of "all V2 events regardless of level 2 or 4".
   **/
 -static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
 +static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
  {
  	struct e1000_adapter *adapter = netdev_priv(netdev);
  	struct hwtstamp_config config;
@@@ -5825,14 -5825,6 +5825,14 @@@
  			    sizeof(config)) ? -EFAULT : 0;
  }
  
 +static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
 +{
 +	struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +	return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
 +			    sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
 +}
 +
  static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  {
  	switch (cmd) {
@@@ -5841,9 -5833,7 +5841,9 @@@
  	case SIOCSMIIREG:
  		return e1000_mii_ioctl(netdev, ifr, cmd);
  	case SIOCSHWTSTAMP:
 -		return e1000e_hwtstamp_ioctl(netdev, ifr);
 +		return e1000e_hwtstamp_set(netdev, ifr);
 +	case SIOCGHWTSTAMP:
 +		return e1000e_hwtstamp_get(netdev, ifr);
  	default:
  		return -EOPNOTSUPP;
  	}
@@@ -6184,7 -6174,7 +6184,7 @@@ static int __e1000_resume(struct pci_de
  	return 0;
  }
  
- #ifdef CONFIG_PM_SLEEP
+ #ifdef CONFIG_PM
  static int e1000_suspend(struct device *dev)
  {
  	struct pci_dev *pdev = to_pci_dev(dev);
@@@ -6203,7 -6193,7 +6203,7 @@@ static int e1000_resume(struct device *
  
  	return __e1000_resume(pdev);
  }
- #endif /* CONFIG_PM_SLEEP */
+ #endif /* CONFIG_PM */
  
  #ifdef CONFIG_PM_RUNTIME
  static int e1000_runtime_suspend(struct device *dev)
diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 9ce07f3,72084f7..359f6e6
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@@ -291,7 -291,9 +291,9 @@@ static int ixgbe_pci_sriov_disable(stru
  {
  	struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
  	int err;
+ #ifdef CONFIG_PCI_IOV
  	u32 current_flags = adapter->flags;
+ #endif
  
  	err = ixgbe_disable_sriov(adapter);
  
@@@ -715,7 -717,8 +717,7 @@@ static int ixgbe_set_vf_mac_addr(struc
  	}
  
  	if (adapter->vfinfo[vf].pf_set_mac &&
 -	    memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
 -		   ETH_ALEN)) {
 +	    !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
  		e_warn(drv,
  		       "VF %d attempted to override administratively set MAC address\n"
  		       "Reload the VF driver to resume operations\n",
diff --combined drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 3010abb,cc68657..3205861
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@@ -14,7 -14,9 +14,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
 - * along with this program; if not, write to the Free Software
 - * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
 - * MA  02111-1307, USA.
 + * along with this program; if not, see <http://www.gnu.org/licenses/>.
   *
   * The full GNU General Public License is included in this distribution
   * in the file called "COPYING".
@@@ -1602,13 -1604,13 +1602,13 @@@ netxen_process_lro(struct netxen_adapte
  	u32 seq_number;
  	u8 vhdr_len = 0;
  
- 	if (unlikely(ring > adapter->max_rds_rings))
+ 	if (unlikely(ring >= adapter->max_rds_rings))
  		return NULL;
  
  	rds_ring = &recv_ctx->rds_rings[ring];
  
  	index = netxen_get_lro_sts_refhandle(sts_data0);
- 	if (unlikely(index > rds_ring->num_desc))
+ 	if (unlikely(index >= rds_ring->num_desc))
  		return NULL;
  
  	buffer = &rds_ring->rx_buf_arr[index];
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 4afdef0c,ff80cd8..35d4876
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@@ -38,8 -38,8 +38,8 @@@
  
  #define _QLCNIC_LINUX_MAJOR 5
  #define _QLCNIC_LINUX_MINOR 3
 -#define _QLCNIC_LINUX_SUBVERSION 52
 -#define QLCNIC_LINUX_VERSIONID  "5.3.52"
 +#define _QLCNIC_LINUX_SUBVERSION 53
 +#define QLCNIC_LINUX_VERSIONID  "5.3.53"
  #define QLCNIC_DRV_IDC_VER  0x01
  #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
  		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@@ -115,10 -115,6 +115,10 @@@ enum qlcnic_queue_type 
  #define QLCNIC_VNIC_MODE	0xFF
  #define QLCNIC_DEFAULT_MODE	0x0
  
 +/* Virtual NIC function count */
 +#define QLC_DEFAULT_VNIC_COUNT	8
 +#define QLC_84XX_VNIC_COUNT	16
 +
  /*
   * Following are the states of the Phantom. Phantom will set them and
   * Host will read to check if the fields are correct.
@@@ -378,7 -374,7 +378,7 @@@ struct qlcnic_rx_buffer 
  
  #define QLCNIC_INTR_DEFAULT			0x04
  #define QLCNIC_CONFIG_INTR_COALESCE		3
 -#define QLCNIC_DEV_INFO_SIZE			1
 +#define QLCNIC_DEV_INFO_SIZE			2
  
  struct qlcnic_nic_intr_coalesce {
  	u8	type;
@@@ -466,10 -462,8 +466,10 @@@ struct qlcnic_hardware_context 
  	u16 max_rx_ques;
  	u16 max_mtu;
  	u32 msg_enable;
 -	u16 act_pci_func;
 +	u16 total_nic_func;
  	u16 max_pci_func;
 +	u32 max_vnic_func;
 +	u32 total_pci_func;
  
  	u32 capabilities;
  	u32 extra_capability[3];
@@@ -493,6 -487,7 +493,7 @@@
  	struct qlcnic_mailbox *mailbox;
  	u8 extend_lb_time;
  	u8 phys_port_id[ETH_ALEN];
+ 	u8 lb_mode;
  };
  
  struct qlcnic_adapter_stats {
@@@ -584,6 -579,8 +585,8 @@@ struct qlcnic_host_tx_ring 
  	dma_addr_t phys_addr;
  	dma_addr_t hw_cons_phys_addr;
  	struct netdev_queue *txq;
+ 	/* Lock to protect Tx descriptors cleanup */
+ 	spinlock_t tx_clean_lock;
  } ____cacheline_internodealigned_in_smp;
  
  /*
@@@ -794,10 -791,9 +797,10 @@@ struct qlcnic_cardrsp_tx_ctx 
  #define QLCNIC_MAC_VLAN_ADD	3
  #define QLCNIC_MAC_VLAN_DEL	4
  
 -struct qlcnic_mac_list_s {
 +struct qlcnic_mac_vlan_list {
  	struct list_head list;
  	uint8_t mac_addr[ETH_ALEN+2];
 +	u16 vlan_id;
  };
  
  /* MAC Learn */
@@@ -815,6 -811,7 +818,7 @@@
  
  #define QLCNIC_ILB_MODE		0x1
  #define QLCNIC_ELB_MODE		0x2
+ #define QLCNIC_LB_MODE_MASK	0x3
  
  #define QLCNIC_LINKEVENT	0x1
  #define QLCNIC_LB_RESPONSE	0x2
@@@ -863,7 -860,7 +867,7 @@@
  #define QLCNIC_FW_CAP2_HW_LRO_IPV6		BIT_3
  #define QLCNIC_FW_CAPABILITY_SET_DRV_VER	BIT_5
  #define QLCNIC_FW_CAPABILITY_2_BEACON		BIT_7
 -#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG	BIT_8
 +#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG	BIT_9
  
  /* module types */
  #define LINKEVENT_MODULE_NOT_PRESENT			1
@@@ -1100,7 -1097,6 +1104,6 @@@ struct qlcnic_adapter 
  	struct qlcnic_filter_hash rx_fhash;
  	struct list_head vf_mc_list;
  
- 	spinlock_t tx_clean_lock;
  	spinlock_t mac_learn_lock;
  	/* spinlock for catching rcv filters for eswitch traffic */
  	spinlock_t rx_mac_learn_lock;
@@@ -1644,9 -1640,7 +1647,9 @@@ int qlcnic_setup_netdev(struct qlcnic_a
  void qlcnic_set_netdev_features(struct qlcnic_adapter *,
  				struct qlcnic_esw_func_cfg *);
  void qlcnic_sriov_vf_schedule_multi(struct net_device *);
 -void qlcnic_vf_add_mc_list(struct net_device *, u16);
 +int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
 +int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
 +			     u16 *);
  
  /*
   * QLOGIC Board information
@@@ -2145,26 -2139,4 +2148,26 @@@ static inline bool qlcnic_sriov_vf_chec
  
  	return status;
  }
 +
 +static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
 +{
 +	unsigned short device = adapter->pdev->device;
 +
 +	return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
 +}
 +
 +static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
 +{
 +	unsigned short device = adapter->pdev->device;
 +
 +	return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
 +}
 +
 +static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
 +{
 +	if (qlcnic_84xx_check(adapter))
 +		return QLC_84XX_VNIC_COUNT;
 +	else
 +		return QLC_DEFAULT_VNIC_COUNT;
 +}
  #endif				/* __QLCNIC_H_ */
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b3fd160,f776f99..03eb2ad
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@@ -15,7 -15,6 +15,7 @@@
  
  #define RSS_HASHTYPE_IP_TCP		0x3
  #define QLC_83XX_FW_MBX_CMD		0
 +#define QLC_SKIP_INACTIVE_PCI_REGS	7
  
  static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
  	{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@@ -35,7 -34,7 +35,7 @@@
  	{QLCNIC_CMD_READ_MAX_MTU, 4, 2},
  	{QLCNIC_CMD_READ_MAX_LRO, 4, 2},
  	{QLCNIC_CMD_MAC_ADDRESS, 4, 3},
 -	{QLCNIC_CMD_GET_PCI_INFO, 1, 66},
 +	{QLCNIC_CMD_GET_PCI_INFO, 1, 129},
  	{QLCNIC_CMD_GET_NIC_INFO, 2, 19},
  	{QLCNIC_CMD_SET_NIC_INFO, 32, 1},
  	{QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
@@@ -69,7 -68,7 +69,7 @@@
  	{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
  	{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
  	{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
 -	{QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
 +	{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
  };
  
  const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@@ -290,7 -289,6 +290,7 @@@ int qlcnic_83xx_setup_intr(struct qlcni
  		if (qlcnic_sriov_vf_check(adapter))
  			return -EINVAL;
  		num_msix = 1;
 +		adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
  	}
  	/* setup interrupt mapping table for fw */
  	ahw->intr_tbl = vzalloc(num_msix *
@@@ -317,12 -315,12 +317,12 @@@
  	return 0;
  }
  
 -inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
 +static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
  {
  	writel(0, adapter->tgt_mask_reg);
  }
  
 -inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
 +static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
  {
  	if (adapter->tgt_mask_reg)
  		writel(1, adapter->tgt_mask_reg);
@@@ -342,7 -340,7 +342,7 @@@ void qlcnic_83xx_disable_intr(struct ql
  	writel(1, sds_ring->crb_intr_mask);
  }
  
 -inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
 +static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
  						    *adapter)
  {
  	u32 mask;
@@@ -639,7 -637,7 +639,7 @@@ int qlcnic_83xx_get_port_info(struct ql
  void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
  {
  	struct qlcnic_hardware_context *ahw = adapter->ahw;
 -	u16 act_pci_fn = ahw->act_pci_func;
 +	u16 act_pci_fn = ahw->total_nic_func;
  	u16 count;
  
  	ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
@@@ -1500,7 -1498,8 +1500,7 @@@ int  qlcnic_83xx_set_led(struct net_dev
  	return err;
  }
  
 -void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
 -				       int enable)
 +void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
  {
  	struct qlcnic_cmd_args cmd;
  	int status;
@@@ -1508,21 -1507,21 +1508,21 @@@
  	if (qlcnic_sriov_vf_check(adapter))
  		return;
  
 -	if (enable) {
 +	if (enable)
  		status = qlcnic_alloc_mbx_args(&cmd, adapter,
  					       QLCNIC_CMD_INIT_NIC_FUNC);
 -		if (status)
 -			return;
 -
 -		cmd.req.arg[1] = BIT_0 | BIT_31;
 -	} else {
 +	else
  		status = qlcnic_alloc_mbx_args(&cmd, adapter,
  					       QLCNIC_CMD_STOP_NIC_FUNC);
 -		if (status)
 -			return;
  
 -		cmd.req.arg[1] = BIT_0 | BIT_31;
 -	}
 +	if (status)
 +		return;
 +
 +	cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
 +
 +	if (adapter->dcb)
 +		cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
 +
  	status = qlcnic_issue_cmd(adapter, &cmd);
  	if (status)
  		dev_err(&adapter->pdev->dev,
@@@ -1618,7 -1617,7 +1618,7 @@@ int qlcnic_83xx_nic_set_promisc(struct 
  
  	cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
  	qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
 -	cmd->req.arg[1] = (mode ? 1 : 0) | temp;
 +	cmd->req.arg[1] = mode | temp;
  	err = qlcnic_issue_cmd(adapter, cmd);
  	if (!err)
  		return err;
@@@ -1685,12 -1684,6 +1685,6 @@@ int qlcnic_83xx_loopback_test(struct ne
  		}
  	} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
  
- 	/* Make sure carrier is off and queue is stopped during loopback */
- 	if (netif_running(netdev)) {
- 		netif_carrier_off(netdev);
- 		netif_tx_stop_all_queues(netdev);
- 	}
- 
  	ret = qlcnic_do_lb_test(adapter, mode);
  
  	qlcnic_83xx_clear_lb_mode(adapter, mode);
@@@ -2122,6 -2115,7 +2116,7 @@@ static void qlcnic_83xx_handle_link_aen
  	ahw->link_autoneg = MSB(MSW(data[3]));
  	ahw->module_type = MSB(LSW(data[3]));
  	ahw->has_link_events = 1;
+ 	ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
  	qlcnic_advert_link_change(adapter, link_status);
  }
  
@@@ -2274,37 -2268,11 +2269,37 @@@ out
  	return err;
  }
  
 +int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
 +			     u16 *nic, u16 *fcoe, u16 *iscsi)
 +{
 +	struct device *dev = &adapter->pdev->dev;
 +	int err = 0;
 +
 +	switch (type) {
 +	case QLCNIC_TYPE_NIC:
 +		(*nic)++;
 +		break;
 +	case QLCNIC_TYPE_FCOE:
 +		(*fcoe)++;
 +		break;
 +	case QLCNIC_TYPE_ISCSI:
 +		(*iscsi)++;
 +		break;
 +	default:
 +		dev_err(dev, "%s: Unknown PCI type[%x]\n",
 +			__func__, type);
 +		err = -EIO;
 +	}
 +
 +	return err;
 +}
 +
  int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
  			     struct qlcnic_pci_info *pci_info)
  {
  	struct qlcnic_hardware_context *ahw = adapter->ahw;
  	struct device *dev = &adapter->pdev->dev;
 +	u16 nic = 0, fcoe = 0, iscsi = 0;
  	struct qlcnic_cmd_args cmd;
  	int i, err = 0, j = 0;
  	u32 temp;
@@@ -2315,20 -2283,16 +2310,20 @@@
  
  	err = qlcnic_issue_cmd(adapter, &cmd);
  
 -	ahw->act_pci_func = 0;
 +	ahw->total_nic_func = 0;
  	if (err == QLCNIC_RCODE_SUCCESS) {
  		ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
 -		for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
 +		for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
  			pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
  			pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
  			i++;
 +			if (!pci_info->active) {
 +				i += QLC_SKIP_INACTIVE_PCI_REGS;
 +				continue;
 +			}
  			pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
 -			if (pci_info->type == QLCNIC_TYPE_NIC)
 -				ahw->act_pci_func++;
 +			err = qlcnic_get_pci_func_type(adapter, pci_info->type,
 +						       &nic, &fcoe, &iscsi);
  			temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
  			pci_info->default_port = temp;
  			i++;
@@@ -2346,13 -2310,6 +2341,13 @@@
  		err = -EIO;
  	}
  
 +	ahw->total_nic_func = nic;
 +	ahw->total_pci_func = nic + fcoe + iscsi;
 +	if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
 +		dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
 +			__func__, ahw->total_nic_func, ahw->total_pci_func);
 +		err = -EIO;
 +	}
  	qlcnic_free_mbx_args(&cmd);
  
  	return err;
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index a215e0f,ad1531a..6373f60
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@@ -127,7 -127,7 +127,7 @@@
  struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
  				     struct qlcnic_host_rds_ring *, u16, u16);
  
 -inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
 +static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
  				  struct qlcnic_host_tx_ring *tx_ring)
  {
  	if (qlcnic_check_multi_tx(adapter) &&
@@@ -144,13 -144,13 +144,13 @@@ static inline void qlcnic_disable_tx_in
  		writel(1, tx_ring->crb_intr_mask);
  }
  
 -inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
 +static inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
  				       struct qlcnic_host_tx_ring *tx_ring)
  {
  	writel(0, tx_ring->crb_intr_mask);
  }
  
 -inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
 +static inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
  					struct qlcnic_host_tx_ring *tx_ring)
  {
  	writel(1, tx_ring->crb_intr_mask);
@@@ -202,7 -202,7 +202,7 @@@ static struct qlcnic_filter *qlcnic_fin
  	struct hlist_node *n;
  
  	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
 -		if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
 +		if (ether_addr_equal(tmp_fil->faddr, addr) &&
  		    tmp_fil->vlan_id == vlan_id)
  			return tmp_fil;
  	}
@@@ -346,7 -346,7 +346,7 @@@ static void qlcnic_send_filter(struct q
  	head = &(adapter->fhash.fhead[hindex]);
  
  	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
 -		if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
 +		if (ether_addr_equal(tmp_fil->faddr, &src_addr) &&
  		    tmp_fil->vlan_id == vlan_id) {
  			if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
  				qlcnic_change_filter(adapter, &src_addr,
@@@ -689,6 -689,10 +689,10 @@@ void qlcnic_advert_link_change(struct q
  		adapter->ahw->linkup = 0;
  		netif_carrier_off(netdev);
  	} else if (!adapter->ahw->linkup && linkup) {
+ 		/* Do not advertise Link up if the port is in loopback mode */
+ 		if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
+ 			return;
+ 
  		netdev_info(netdev, "NIC Link is up\n");
  		adapter->ahw->linkup = 1;
  		netif_carrier_on(netdev);
@@@ -778,7 -782,7 +782,7 @@@ static int qlcnic_process_cmd_ring(stru
  	struct net_device *netdev = adapter->netdev;
  	struct qlcnic_skb_frag *frag;
  
- 	if (!spin_trylock(&adapter->tx_clean_lock))
+ 	if (!spin_trylock(&tx_ring->tx_clean_lock))
  		return 1;
  
  	sw_consumer = tx_ring->sw_consumer;
@@@ -807,8 -811,9 +811,9 @@@
  			break;
  	}
  
+ 	tx_ring->sw_consumer = sw_consumer;
+ 
  	if (count && netif_running(netdev)) {
- 		tx_ring->sw_consumer = sw_consumer;
  		smp_mb();
  		if (netif_tx_queue_stopped(tx_ring->txq) &&
  		    netif_carrier_ok(netdev)) {
@@@ -834,7 -839,8 +839,8 @@@
  	 */
  	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
  	done = (sw_consumer == hw_consumer);
- 	spin_unlock(&adapter->tx_clean_lock);
+ 
+ 	spin_unlock(&tx_ring->tx_clean_lock);
  
  	return done;
  }
@@@ -1460,7 -1466,8 +1466,7 @@@ int qlcnic_82xx_napi_add(struct qlcnic_
  	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
  		sds_ring = &recv_ctx->sds_rings[ring];
  		if (qlcnic_check_multi_tx(adapter) &&
 -		    !adapter->ahw->diag_test &&
 -		    (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
 +		    !adapter->ahw->diag_test) {
  			netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
  				       NAPI_POLL_WEIGHT);
  		} else {
@@@ -1533,7 -1540,8 +1539,7 @@@ void qlcnic_82xx_napi_enable(struct qlc
  
  	if (qlcnic_check_multi_tx(adapter) &&
  	    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
 -	    !adapter->ahw->diag_test &&
 -	    (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
 +	    !adapter->ahw->diag_test) {
  		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
  			tx_ring = &adapter->tx_ring[ring];
  			napi_enable(&tx_ring->napi);
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d131ec1,b8a245a..eeec83a
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@@ -308,12 -308,12 +308,12 @@@ int qlcnic_read_mac_addr(struct qlcnic_
  
  static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
  {
 -	struct qlcnic_mac_list_s *cur;
 +	struct qlcnic_mac_vlan_list *cur;
  	struct list_head *head;
  
  	list_for_each(head, &adapter->mac_list) {
 -		cur = list_entry(head, struct qlcnic_mac_list_s, list);
 -		if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
 +		cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
 +		if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
  			qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
  						  0, QLCNIC_MAC_DEL);
  			list_del(&cur->list);
@@@ -337,7 -337,7 +337,7 @@@ static int qlcnic_set_mac(struct net_de
  	if (!is_valid_ether_addr(addr->sa_data))
  		return -EINVAL;
  
 -	if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
 +	if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
  		return 0;
  
  	if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@@ -646,7 -646,8 +646,7 @@@ int qlcnic_enable_msix(struct qlcnic_ad
  			} else {
  				adapter->ahw->num_msix = num_msix;
  				if (qlcnic_check_multi_tx(adapter) &&
 -				    !adapter->ahw->diag_test &&
 -				    (adapter->drv_tx_rings > 1))
 +				    !adapter->ahw->diag_test)
  					drv_sds_rings = num_msix - drv_tx_rings;
  				else
  					drv_sds_rings = num_msix;
@@@ -799,26 -800,25 +799,26 @@@ static void qlcnic_cleanup_pci_map(stru
  
  static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
  {
 +	struct qlcnic_hardware_context *ahw = adapter->ahw;
  	struct qlcnic_pci_info *pci_info;
  	int ret;
  
  	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
 -		switch (adapter->ahw->port_type) {
 +		switch (ahw->port_type) {
  		case QLCNIC_GBE:
 -			adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
 +			ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
  			break;
  		case QLCNIC_XGBE:
 -			adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
 +			ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
  			break;
  		}
  		return 0;
  	}
  
 -	if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
 +	if (ahw->op_mode == QLCNIC_MGMT_FUNC)
  		return 0;
  
 -	pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
 +	pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
  	if (!pci_info)
  		return -ENOMEM;
  
@@@ -846,13 -846,12 +846,13 @@@ static bool qlcnic_port_eswitch_cfg_cap
  
  int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
  {
 +	struct qlcnic_hardware_context *ahw = adapter->ahw;
  	struct qlcnic_pci_info *pci_info;
  	int i, id = 0, ret = 0, j = 0;
  	u16 act_pci_func;
  	u8 pfn;
  
 -	pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
 +	pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
  	if (!pci_info)
  		return -ENOMEM;
  
@@@ -860,7 -859,7 +860,7 @@@
  	if (ret)
  		goto err_pci_info;
  
 -	act_pci_func = adapter->ahw->act_pci_func;
 +	act_pci_func = ahw->total_nic_func;
  
  	adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
  				 act_pci_func, GFP_KERNEL);
@@@ -876,10 -875,10 +876,10 @@@
  		goto err_npars;
  	}
  
 -	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
 +	for (i = 0; i < ahw->max_vnic_func; i++) {
  		pfn = pci_info[i].id;
  
 -		if (pfn >= QLCNIC_MAX_PCI_FUNC) {
 +		if (pfn >= ahw->max_vnic_func) {
  			ret = QL_STATUS_INVALID_PARAM;
  			goto err_eswitch;
  		}
@@@ -1347,7 -1346,7 +1347,7 @@@ int qlcnic_set_default_offload_settings
  	if (adapter->need_fw_reset)
  		return 0;
  
 -	for (i = 0; i < adapter->ahw->act_pci_func; i++) {
 +	for (i = 0; i < adapter->ahw->total_nic_func; i++) {
  		if (!adapter->npars[i].eswitch_status)
  			continue;
  
@@@ -1410,7 -1409,7 +1410,7 @@@ int qlcnic_reset_npar_config(struct qlc
  			return 0;
  
  	/* Set the NPAR config data after FW reset */
 -	for (i = 0; i < adapter->ahw->act_pci_func; i++) {
 +	for (i = 0; i < adapter->ahw->total_nic_func; i++) {
  		npar = &adapter->npars[i];
  		pci_func = npar->pci_func;
  		if (!adapter->npars[i].eswitch_status)
@@@ -1757,7 -1756,6 +1757,6 @@@ void __qlcnic_down(struct qlcnic_adapte
  	if (qlcnic_sriov_vf_check(adapter))
  		qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
  	smp_mb();
- 	spin_lock(&adapter->tx_clean_lock);
  	netif_carrier_off(netdev);
  	adapter->ahw->linkup = 0;
  	netif_tx_disable(netdev);
@@@ -1778,7 -1776,6 +1777,6 @@@
  
  	for (ring = 0; ring < adapter->drv_tx_rings; ring++)
  		qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
- 	spin_unlock(&adapter->tx_clean_lock);
  }
  
  /* Usage: During suspend and firmware recovery module */
@@@ -2038,7 -2035,7 +2036,7 @@@ qlcnic_reset_context(struct qlcnic_adap
  void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
  {
  	struct qlcnic_hardware_context *ahw = adapter->ahw;
 -	u16 act_pci_fn = ahw->act_pci_func;
 +	u16 act_pci_fn = ahw->total_nic_func;
  	u16 count;
  
  	ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
@@@ -2173,6 -2170,7 +2171,7 @@@ int qlcnic_alloc_tx_rings(struct qlcnic
  		}
  		memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
  		tx_ring->cmd_buf_arr = cmd_buf_arr;
+ 		spin_lock_init(&tx_ring->tx_clean_lock);
  	}
  
  	if (qlcnic_83xx_check(adapter) ||
@@@ -2213,6 -2211,7 +2212,6 @@@ qlcnic_probe(struct pci_dev *pdev, cons
  	struct qlcnic_hardware_context *ahw;
  	int err, pci_using_dac = -1;
  	char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
 -	struct qlcnic_dcb *dcb;
  
  	if (pdev->is_virtfn)
  		return -ENODEV;
@@@ -2290,8 -2289,7 +2289,8 @@@
  		goto err_out_free_wq;
  
  	adapter->dev_rst_time = jiffies;
 -	adapter->ahw->revision_id = pdev->revision;
 +	ahw->revision_id = pdev->revision;
 +	ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
  	if (qlcnic_mac_learn == FDB_MAC_LEARN)
  		adapter->fdb_mac_learn = true;
  	else if (qlcnic_mac_learn == DRV_MAC_LEARN)
@@@ -2300,7 -2298,6 +2299,6 @@@
  	rwlock_init(&adapter->ahw->crb_lock);
  	mutex_init(&adapter->ahw->mem_lock);
  
- 	spin_lock_init(&adapter->tx_clean_lock);
  	INIT_LIST_HEAD(&adapter->mac_list);
  
  	qlcnic_register_dcb(adapter);
@@@ -2336,6 -2333,10 +2334,6 @@@
  
  		adapter->flags |= QLCNIC_NEED_FLR;
  
 -		dcb = adapter->dcb;
 -
 -		if (dcb && qlcnic_dcb_attach(dcb))
 -			qlcnic_clear_dcb_ops(dcb);
  	} else if (qlcnic_83xx_check(adapter)) {
  		qlcnic_83xx_check_vf(adapter, ent);
  		adapter->portnum = adapter->ahw->pci_func;
@@@ -2364,8 -2365,6 +2362,8 @@@
  		goto err_out_free_hw;
  	}
  
 +	qlcnic_dcb_enable(adapter->dcb);
 +
  	if (qlcnic_read_mac_addr(adapter))
  		dev_warn(&pdev->dev, "failed to read mac addr\n");
  
@@@ -2499,11 -2498,13 +2497,11 @@@ static void qlcnic_remove(struct pci_de
  	qlcnic_cancel_idc_work(adapter);
  	ahw = adapter->ahw;
  
 -	qlcnic_dcb_free(adapter->dcb);
 -
  	unregister_netdev(netdev);
  	qlcnic_sriov_cleanup(adapter);
  
  	if (qlcnic_83xx_check(adapter)) {
 -		qlcnic_83xx_register_nic_idc_func(adapter, 0);
 +		qlcnic_83xx_initialize_nic(adapter, 0);
  		cancel_delayed_work_sync(&adapter->idc_aen_work);
  		qlcnic_83xx_free_mbx_intr(adapter);
  		qlcnic_83xx_detach_mailbox_work(adapter);
@@@ -2511,8 -2512,6 +2509,8 @@@
  		kfree(ahw->fw_info);
  	}
  
 +	qlcnic_dcb_free(adapter->dcb);
 +
  	qlcnic_detach(adapter);
  
  	if (adapter->npars != NULL)
@@@ -2641,7 -2640,7 +2639,7 @@@ void qlcnic_alloc_lb_filters_mem(struc
  	if (adapter->fhash.fmax && adapter->fhash.fhead)
  		return;
  
 -	act_pci_func = adapter->ahw->act_pci_func;
 +	act_pci_func = adapter->ahw->total_nic_func;
  	spin_lock_init(&adapter->mac_learn_lock);
  	spin_lock_init(&adapter->rx_mac_learn_lock);
  
@@@ -3724,6 -3723,12 +3722,6 @@@ int qlcnic_validate_rings(struct qlcnic
  		return -EINVAL;
  	}
  
 -	if (ring_cnt < 2) {
 -		netdev_err(netdev,
 -			   "%s rings value should not be lower than 2\n", buf);
 -		return -EINVAL;
 -	}
 -
  	if (!is_power_of_2(ring_cnt)) {
  		netdev_err(netdev, "%s rings value should be a power of 2\n",
  			   buf);
@@@ -3781,7 -3786,8 +3779,7 @@@ int qlcnic_setup_rings(struct qlcnic_ad
  	}
  
  	if (qlcnic_83xx_check(adapter)) {
 -		/* register for NIC IDC AEN Events */
 -		qlcnic_83xx_register_nic_idc_func(adapter, 1);
 +		qlcnic_83xx_initialize_nic(adapter, 1);
  		err = qlcnic_83xx_setup_mbx_intr(adapter);
  		qlcnic_83xx_disable_mbx_poll(adapter);
  		if (err) {
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 98b621f,024f816..d14d9a1
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@@ -9,7 -9,7 +9,7 @@@
  #include "qlcnic.h"
  #include <linux/types.h>
  
 -#define QLCNIC_SRIOV_VF_MAX_MAC 1
 +#define QLCNIC_SRIOV_VF_MAX_MAC 8
  #define QLC_VF_MIN_TX_RATE	100
  #define QLC_VF_MAX_TX_RATE	9999
  
@@@ -64,10 -64,9 +64,10 @@@ static int qlcnic_sriov_pf_cal_res_limi
  {
  	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
  	struct qlcnic_resources *res = &sriov->ff_max;
 -	u32 temp, num_vf_macs, num_vfs, max;
 +	u16 num_macs = sriov->num_allowed_vlans + 1;
  	int ret = -EIO, vpid, id;
  	struct qlcnic_vport *vp;
 +	u32 num_vfs, max, temp;
  
  	vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
  	if (vpid < 0)
@@@ -76,22 -75,17 +76,26 @@@
  	num_vfs = sriov->num_vfs;
  	max = num_vfs + 1;
  	info->bit_offsets = 0xffff;
 +	info->max_tx_ques = res->num_tx_queues / max;
 +
 +	if (qlcnic_83xx_pf_check(adapter))
 +		num_macs = 1;
 +
+ 	info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
 -	num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
+ 
  	if (adapter->ahw->pci_func == func) {
 -		temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
 -		info->max_rx_ucast_mac_filters = temp;
 -		temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
 -		info->max_tx_mac_filters = temp;
  		info->min_tx_bw = 0;
  		info->max_tx_bw = MAX_BW;
++
 +		temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
 +		info->max_rx_ucast_mac_filters = temp;
 +		temp = res->num_tx_mac_filters - num_macs * num_vfs;
 +		info->max_tx_mac_filters = temp;
 +		temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
 +		temp = res->num_rx_mcast_mac_filters - temp;
 +		info->max_rx_mcast_mac_filters = temp;
 +
+ 		info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
  	} else {
  		id = qlcnic_sriov_func_to_index(adapter, func);
  		if (id < 0)
@@@ -99,10 -93,9 +103,13 @@@
  		vp = sriov->vf_info[id].vp;
  		info->min_tx_bw = vp->min_tx_bw;
  		info->max_tx_bw = vp->max_tx_bw;
 -		info->max_rx_ucast_mac_filters = num_vf_macs;
 -		info->max_tx_mac_filters = num_vf_macs;
++
 +		info->max_rx_ucast_mac_filters = num_macs;
 +		info->max_tx_mac_filters = num_macs;
 +		temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
 +		info->max_rx_mcast_mac_filters = temp;
++
+ 		info->max_tx_ques = QLCNIC_SINGLE_RING;
  	}
  
  	info->max_rx_ip_addr = res->num_destip / max;
@@@ -140,25 -133,6 +147,25 @@@ static void qlcnic_sriov_pf_set_ff_max_
  	ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
  }
  
 +static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
 +					 struct qlcnic_info *npar_info)
 +{
 +	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 +	int temp, total_fn;
 +
 +	temp = npar_info->max_rx_mcast_mac_filters;
 +	total_fn = sriov->num_vfs + 1;
 +
 +	temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
 +	sriov->num_allowed_vlans = temp - 1;
 +
 +	if (qlcnic_83xx_pf_check(adapter))
 +		sriov->num_allowed_vlans = 1;
 +
 +	netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
 +		    sriov->num_allowed_vlans);
 +}
 +
  static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
  				    struct qlcnic_info *npar_info)
  {
@@@ -192,7 -166,6 +199,7 @@@
  	npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
  	npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
  
 +	qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
  	qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
  	dev_info(&adapter->pdev->dev,
  		 "\n\ttotal_pf: %d,\n"
@@@ -431,8 -404,6 +438,8 @@@ static int qlcnic_pci_sriov_disable(str
  
  	qlcnic_sriov_pf_disable(adapter);
  
 +	qlcnic_sriov_free_vlans(adapter);
 +
  	qlcnic_sriov_pf_cleanup(adapter);
  
  	/* After disabling SRIOV re-init the driver in default mode
@@@ -541,8 -512,6 +548,8 @@@ static int __qlcnic_pci_sriov_enable(st
  	if (err)
  		goto del_flr_queue;
  
 +	qlcnic_sriov_alloc_vlans(adapter);
 +
  	err = qlcnic_sriov_pf_enable(adapter, num_vfs);
  	return err;
  
@@@ -640,7 -609,7 +647,7 @@@ static int qlcnic_sriov_set_vf_acl(stru
  
  	if (vp->vlan_mode == QLC_PVID_MODE) {
  		cmd.req.arg[2] |= BIT_6;
 -		cmd.req.arg[3] |= vp->vlan << 8;
 +		cmd.req.arg[3] |= vp->pvid << 8;
  	}
  
  	err = qlcnic_issue_cmd(adapter, &cmd);
@@@ -675,13 -644,10 +682,13 @@@ static int qlcnic_sriov_pf_channel_cfg_
  	struct qlcnic_vf_info *vf = trans->vf;
  	struct qlcnic_vport *vp = vf->vp;
  	struct qlcnic_adapter *adapter;
 +	struct qlcnic_sriov *sriov;
  	u16 func = vf->pci_func;
 +	size_t size;
  	int err;
  
  	adapter = vf->adapter;
 +	sriov = adapter->ahw->sriov;
  
  	if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
  		err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@@ -691,12 -657,8 +698,12 @@@
  				qlcnic_sriov_pf_config_vport(adapter, 0, func);
  		}
  	} else {
 -		if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
 -			vp->vlan = 0;
 +		if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
 +			size = sizeof(*vf->sriov_vlans);
 +			size = size * sriov->num_allowed_vlans;
 +			memset(vf->sriov_vlans, 0, size);
 +		}
 +
  		err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
  	}
  
@@@ -718,23 -680,20 +725,23 @@@ err_out
  }
  
  static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
 -				       struct qlcnic_vport *vp,
 -				       u16 func, u16 vlan, u8 op)
 +				       struct qlcnic_vf_info *vf,
 +				       u16 vlan, u8 op)
  {
  	struct qlcnic_cmd_args cmd;
  	struct qlcnic_macvlan_mbx mv;
 +	struct qlcnic_vport *vp;
  	u8 *addr;
  	int err;
  	u32 *buf;
  	int vpid;
  
 +	vp = vf->vp;
 +
  	if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
  		return -ENOMEM;
  
 -	vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
 +	vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
  	if (vpid < 0) {
  		err = -EINVAL;
  		goto out;
@@@ -778,35 -737,6 +785,35 @@@ static int qlcnic_sriov_validate_create
  	return 0;
  }
  
 +static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
 +					     struct qlcnic_vf_info *vf,
 +					     int opcode)
 +{
 +	struct qlcnic_sriov *sriov;
 +	u16 vlan;
 +	int i;
 +
 +	sriov = adapter->ahw->sriov;
 +
 +	mutex_lock(&vf->vlan_list_lock);
 +	if (vf->num_vlan) {
 +		for (i = 0; i < sriov->num_allowed_vlans; i++) {
 +			vlan = vf->sriov_vlans[i];
 +			if (vlan)
 +				qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
 +							    opcode);
 +		}
 +	}
 +	mutex_unlock(&vf->vlan_list_lock);
 +
 +	if (vf->vp->vlan_mode != QLC_PVID_MODE) {
 +		if (qlcnic_83xx_pf_check(adapter) &&
 +		    qlcnic_sriov_check_any_vlan(vf))
 +			return;
 +		qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
 +	}
 +}
 +
  static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
  					     struct qlcnic_cmd_args *cmd)
  {
@@@ -814,6 -744,7 +821,6 @@@
  	struct qlcnic_adapter *adapter = vf->adapter;
  	struct qlcnic_rcv_mbx_out *mbx_out;
  	int err;
 -	u16 vlan;
  
  	err = qlcnic_sriov_validate_create_rx_ctx(cmd);
  	if (err) {
@@@ -824,10 -755,12 +831,10 @@@
  	cmd->req.arg[6] = vf->vp->handle;
  	err = qlcnic_issue_cmd(adapter, cmd);
  
  	if (!err) {
  		mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
  		vf->rx_ctx_id = mbx_out->ctx_id;
 -		qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
 -					    vlan, QLCNIC_MAC_ADD);
 +		qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
  	} else {
  		vf->rx_ctx_id = 0;
  	}
@@@ -911,6 -844,7 +918,6 @@@ static int qlcnic_sriov_pf_del_rx_ctx_c
  	struct qlcnic_vf_info *vf = trans->vf;
  	struct qlcnic_adapter *adapter = vf->adapter;
  	int err;
 -	u16 vlan;
  
  	err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
  	if (err) {
@@@ -918,7 -852,9 +925,7 @@@
  		return err;
  	}
  
 -	vlan = vf->vp->vlan;
 -	qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
 -				    vlan, QLCNIC_MAC_DEL);
 +	qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
  	cmd->req.arg[1] |= vf->vp->handle << 16;
  	err = qlcnic_issue_cmd(adapter, cmd);
  
@@@ -1185,7 -1121,7 +1192,7 @@@ static int qlcnic_sriov_validate_cfg_ma
  		cmd->req.arg[1] &= ~0x7;
  		new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
  			 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
 -		cmd->req.arg[3] |= vp->vlan << 16;
 +		cmd->req.arg[3] |= vp->pvid << 16;
  		cmd->req.arg[1] |= new_op;
  	}
  
@@@ -1255,10 -1191,8 +1262,10 @@@ static int qlcnic_sriov_pf_get_acl_cmd(
  	struct qlcnic_vport *vp = vf->vp;
  	u8 cmd_op, mode = vp->vlan_mode;
  	struct qlcnic_adapter *adapter;
 +	struct qlcnic_sriov *sriov;
  
  	adapter = vf->adapter;
 +	sriov = adapter->ahw->sriov;
  
  	cmd_op = trans->req_hdr->cmd_op;
  	cmd->rsp.arg[0] |= 1 << 25;
@@@ -1272,10 -1206,10 +1279,10 @@@
  	switch (mode) {
  	case QLC_GUEST_VLAN_MODE:
  		cmd->rsp.arg[1] = mode | 1 << 8;
 -		cmd->rsp.arg[2] = 1 << 16;
 +		cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
  		break;
  	case QLC_PVID_MODE:
 -		cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
 +		cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
  		break;
  	}
  
@@@ -1283,27 -1217,24 +1290,27 @@@
  }
  
  static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
 -					  struct qlcnic_vf_info *vf)
 -
 +					  struct qlcnic_vf_info *vf,
 +					  struct qlcnic_cmd_args *cmd)
  {
 -	struct qlcnic_vport *vp = vf->vp;
 +	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 +	u16 vlan;
  
 -	if (!vp->vlan)
 +	if (!qlcnic_sriov_check_any_vlan(vf))
  		return -EINVAL;
  
 +	vlan = cmd->req.arg[1] >> 16;
  	if (!vf->rx_ctx_id) {
 -		vp->vlan = 0;
 +		qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
  		return 0;
  	}
  
 -	qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
 -				    vp->vlan, QLCNIC_MAC_DEL);
 -	vp->vlan = 0;
 -	qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
 -				    0, QLCNIC_MAC_ADD);
 +	qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
 +	qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
 +
 +	if (qlcnic_83xx_pf_check(adapter))
 +		qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
 +					    0, QLCNIC_MAC_ADD);
  	return 0;
  }
  
@@@ -1311,37 -1242,32 +1318,37 @@@ static int qlcnic_sriov_pf_add_guest_vl
  					  struct qlcnic_vf_info *vf,
  					  struct qlcnic_cmd_args *cmd)
  {
 -	struct qlcnic_vport *vp = vf->vp;
 +	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
  	int err = -EIO;
 +	u16 vlan;
  
 -	if (vp->vlan)
 +	if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
  		return err;
  
 +	vlan = cmd->req.arg[1] >> 16;
 +
  	if (!vf->rx_ctx_id) {
 -		vp->vlan = cmd->req.arg[1] >> 16;
 +		qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
  		return 0;
  	}
  
 -	err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
 -					  0, QLCNIC_MAC_DEL);
 -	if (err)
 -		return err;
 +	if (qlcnic_83xx_pf_check(adapter)) {
 +		err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
 +						  QLCNIC_MAC_DEL);
 +		if (err)
 +			return err;
 +	}
  
 -	vp->vlan = cmd->req.arg[1] >> 16;
 -	err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
 -					  vp->vlan, QLCNIC_MAC_ADD);
 +	err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
  
  	if (err) {
 -		qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
 -					    0, QLCNIC_MAC_ADD);
 -		vp->vlan = 0;
 +		if (qlcnic_83xx_pf_check(adapter))
 +			qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
 +						    QLCNIC_MAC_ADD);
 +		return err;
  	}
  
 +	qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
  	return err;
  }
  
@@@ -1364,7 -1290,7 +1371,7 @@@ static int qlcnic_sriov_pf_cfg_guest_vl
  	if (op)
  		err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
  	else
 -		err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
 +		err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
  
  	cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
  	return err;
@@@ -1374,6 -1300,8 +1381,6 @@@ static const int qlcnic_pf_passthru_sup
  	QLCNIC_CMD_GET_STATISTICS,
  	QLCNIC_CMD_GET_PORT_CONFIG,
  	QLCNIC_CMD_GET_LINK_STATUS,
 -	QLCNIC_CMD_DCB_QUERY_CAP,
 -	QLCNIC_CMD_DCB_QUERY_PARAM,
  	QLCNIC_CMD_INIT_NIC_FUNC,
  	QLCNIC_CMD_STOP_NIC_FUNC,
  };
@@@ -1669,8 -1597,7 +1676,8 @@@ void qlcnic_sriov_pf_handle_flr(struct 
  	}
  
  	if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
 -		vp->vlan = 0;
 +		memset(vf->sriov_vlans, 0,
 +		       sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
  
  	qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
  	netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
@@@ -1840,22 -1767,20 +1847,22 @@@ int qlcnic_sriov_set_vf_vlan(struct net
  		return -EOPNOTSUPP;
  	}
  
 +	memset(vf_info->sriov_vlans, 0,
 +	       sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
 +
  	switch (vlan) {
  	case 4095:
 -		vp->vlan = 0;
  		vp->vlan_mode = QLC_GUEST_VLAN_MODE;
  		break;
  	case 0:
  		vp->vlan_mode = QLC_NO_VLAN_MODE;
 -		vp->vlan = 0;
  		vp->qos = 0;
  		break;
  	default:
  		vp->vlan_mode = QLC_PVID_MODE;
 -		vp->vlan = vlan;
 +		qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
  		vp->qos = qos;
 +		vp->pvid = vlan;
  	}
  
  	netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
@@@ -1870,7 -1795,7 +1877,7 @@@ static __u32 qlcnic_sriov_get_vf_vlan(s
  
  	switch (vp->vlan_mode) {
  	case QLC_PVID_MODE:
 -		vlan = vp->vlan;
 +		vlan = vp->pvid;
  		break;
  	case QLC_GUEST_VLAN_MODE:
  		vlan = MAX_VLAN_ID;
diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2161410,797b56a..b8e3a4c
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@@ -64,7 -64,7 +64,7 @@@ static int debug = -1
  module_param(debug, int, S_IRUGO | S_IWUSR);
  MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  
 -int phyaddr = -1;
 +static int phyaddr = -1;
  module_param(phyaddr, int, S_IRUGO);
  MODULE_PARM_DESC(phyaddr, "Physical device address");
  
@@@ -622,17 -622,15 +622,15 @@@ static int stmmac_init_ptp(struct stmma
  	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
  		return -EOPNOTSUPP;
  
- 	if (netif_msg_hw(priv)) {
- 		if (priv->dma_cap.time_stamp) {
- 			pr_debug("IEEE 1588-2002 Time Stamp supported\n");
- 			priv->adv_ts = 0;
- 		}
- 		if (priv->dma_cap.atime_stamp && priv->extend_desc) {
- 			pr_debug
- 			    ("IEEE 1588-2008 Advanced Time Stamp supported\n");
- 			priv->adv_ts = 1;
- 		}
- 	}
+ 	priv->adv_ts = 0;
+ 	if (priv->dma_cap.atime_stamp && priv->extend_desc)
+ 		priv->adv_ts = 1;
+ 
+ 	if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
+ 		pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+ 
+ 	if (netif_msg_hw(priv) && priv->adv_ts)
+ 		pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
  
  	priv->hw->ptp = &stmmac_ptp;
  	priv->hwts_tx_en = 0;
diff --combined drivers/net/ethernet/ti/cpsw.c
index 243fffb,5330fd2..e8bb77d
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@@ -582,7 -582,7 +582,7 @@@ static void cpsw_intr_disable(struct cp
  	return;
  }
  
 -void cpsw_tx_handler(void *token, int len, int status)
 +static void cpsw_tx_handler(void *token, int len, int status)
  {
  	struct sk_buff		*skb = token;
  	struct net_device	*ndev = skb->dev;
@@@ -599,7 -599,7 +599,7 @@@
  	dev_kfree_skb_any(skb);
  }
  
 -void cpsw_rx_handler(void *token, int len, int status)
 +static void cpsw_rx_handler(void *token, int len, int status)
  {
  	struct sk_buff		*skb = token;
  	struct sk_buff		*new_skb;
@@@ -740,6 -740,8 +740,8 @@@ static void _cpsw_adjust_link(struct cp
  		/* set speed_in input in case RMII mode is used in 100Mbps */
  		if (phy->speed == 100)
  			mac_control |= BIT(15);
+ 		else if (phy->speed == 10)
+ 			mac_control |= BIT(18); /* In Band mode */
  
  		*link = true;
  	} else {
@@@ -1329,7 -1331,7 +1331,7 @@@ static void cpsw_hwtstamp_v2(struct cps
  	__raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
  }
  
 -static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 +static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
  {
  	struct cpsw_priv *priv = netdev_priv(dev);
  	struct cpts *cpts = priv->cpts;
@@@ -1390,24 -1392,6 +1392,24 @@@
  	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  }
  
 +static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 +{
 +	struct cpsw_priv *priv = netdev_priv(dev);
 +	struct cpts *cpts = priv->cpts;
 +	struct hwtstamp_config cfg;
 +
 +	if (priv->version != CPSW_VERSION_1 &&
 +	    priv->version != CPSW_VERSION_2)
 +		return -EOPNOTSUPP;
 +
 +	cfg.flags = 0;
 +	cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
 +	cfg.rx_filter = (cpts->rx_enable ?
 +			 HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
 +
 +	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 +}
 +
  #endif /*CONFIG_TI_CPTS*/
  
  static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@@ -1422,9 -1406,7 +1424,9 @@@
  	switch (cmd) {
  #ifdef CONFIG_TI_CPTS
  	case SIOCSHWTSTAMP:
 -		return cpsw_hwtstamp_ioctl(dev, req);
 +		return cpsw_hwtstamp_set(dev, req);
 +	case SIOCGHWTSTAMP:
 +		return cpsw_hwtstamp_get(dev, req);
  #endif
  	case SIOCGMIIPHY:
  		data->phy_id = priv->slaves[slave_no].phy->addr;
@@@ -2126,7 -2108,7 +2128,7 @@@ static int cpsw_probe(struct platform_d
  	while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
  		for (i = res->start; i <= res->end; i++) {
  			if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
- 					     dev_name(priv->dev), priv)) {
+ 					     dev_name(&pdev->dev), priv)) {
  				dev_err(priv->dev, "error attaching irq\n");
  				goto clean_ale_ret;
  			}
@@@ -2155,8 -2137,8 +2157,8 @@@
  			  data->cpts_clock_mult, data->cpts_clock_shift))
  		dev_err(priv->dev, "error registering cpts device\n");
  
 -	cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
 -		    ss_res->start, ndev->irq);
 +	cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
 +		    &ss_res->start, ndev->irq);
  
  	if (priv->data.dual_emac) {
  		ret = cpsw_probe_dual_emac(pdev, priv);
diff --combined drivers/net/hyperv/netvsc_drv.c
index f80bd0c,71baeb3..7756118
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@@ -11,7 -11,8 +11,7 @@@
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 - * Place - Suite 330, Boston, MA 02111-1307 USA.
 + * this program; if not, see <http://www.gnu.org/licenses/>.
   *
   * Authors:
   *   Haiyang Zhang <haiyangz at microsoft.com>
@@@ -260,9 -261,7 +260,7 @@@ int netvsc_recv_callback(struct hv_devi
  	struct sk_buff *skb;
  
  	net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
- 	if (!net) {
- 		netdev_err(net, "got receive callback but net device"
- 			" not initialized yet\n");
+ 	if (!net || net->reg_state != NETREG_REGISTERED) {
  		packet->status = NVSP_STAT_FAIL;
  		return 0;
  	}
@@@ -434,19 -433,11 +432,11 @@@ static int netvsc_probe(struct hv_devic
  	SET_ETHTOOL_OPS(net, &ethtool_ops);
  	SET_NETDEV_DEV(net, &dev->device);
  
- 	ret = register_netdev(net);
- 	if (ret != 0) {
- 		pr_err("Unable to register netdev.\n");
- 		free_netdev(net);
- 		goto out;
- 	}
- 
  	/* Notify the netvsc driver of the new device */
  	device_info.ring_size = ring_size;
  	ret = rndis_filter_device_add(dev, &device_info);
  	if (ret != 0) {
  		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- 		unregister_netdev(net);
  		free_netdev(net);
  		hv_set_drvdata(dev, NULL);
  		return ret;
@@@ -455,7 -446,13 +445,13 @@@
  
  	netif_carrier_on(net);
  
- out:
+ 	ret = register_netdev(net);
+ 	if (ret != 0) {
+ 		pr_err("Unable to register netdev.\n");
+ 		rndis_filter_device_remove(dev);
+ 		free_netdev(net);
+ 	}
+ 
  	return ret;
  }
  
diff --combined drivers/net/macvlan.c
index 9419836,60406b0..09ababe
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@@ -120,7 -120,7 +120,7 @@@ static int macvlan_broadcast_one(struc
  	struct net_device *dev = vlan->dev;
  
  	if (local)
 -		return vlan->forward(dev, skb);
 +		return dev_forward_skb(dev, skb);
  
  	skb->dev = dev;
  	if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@@ -128,7 -128,7 +128,7 @@@
  	else
  		skb->pkt_type = PACKET_MULTICAST;
  
 -	return vlan->receive(skb);
 +	return netif_rx(skb);
  }
  
  static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@@ -251,7 -251,7 +251,7 @@@ static rx_handler_result_t macvlan_hand
  	skb->dev = dev;
  	skb->pkt_type = PACKET_HOST;
  
 -	ret = vlan->receive(skb);
 +	ret = netif_rx(skb);
  
  out:
  	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
@@@ -290,8 -290,8 +290,8 @@@ xmit_world
  	return dev_queue_xmit(skb);
  }
  
 -netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 -			       struct net_device *dev)
 +static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 +				      struct net_device *dev)
  {
  	unsigned int len = skb->len;
  	int ret;
@@@ -305,7 -305,7 +305,7 @@@
  	}
  
  	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 -		struct macvlan_pcpu_stats *pcpu_stats;
 +		struct vlan_pcpu_stats *pcpu_stats;
  
  		pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
  		u64_stats_update_begin(&pcpu_stats->syncp);
@@@ -317,6 -317,7 +317,6 @@@
  	}
  	return ret;
  }
 -EXPORT_SYMBOL_GPL(macvlan_start_xmit);
  
  static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
  			       unsigned short type, const void *daddr,
@@@ -545,12 -546,12 +545,12 @@@ static int macvlan_init(struct net_devi
  
  	macvlan_set_lockdep_class(dev);
  
 -	vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
 +	vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
  	if (!vlan->pcpu_stats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
 -		struct macvlan_pcpu_stats *mvlstats;
 +		struct vlan_pcpu_stats *mvlstats;
  		mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
  		u64_stats_init(&mvlstats->syncp);
  	}
@@@ -576,7 -577,7 +576,7 @@@ static struct rtnl_link_stats64 *macvla
  	struct macvlan_dev *vlan = netdev_priv(dev);
  
  	if (vlan->pcpu_stats) {
 -		struct macvlan_pcpu_stats *p;
 +		struct vlan_pcpu_stats *p;
  		u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
  		u32 rx_errors = 0, tx_dropped = 0;
  		unsigned int start;
@@@ -689,8 -690,19 +689,19 @@@ static netdev_features_t macvlan_fix_fe
  					      netdev_features_t features)
  {
  	struct macvlan_dev *vlan = netdev_priv(dev);
+ 	netdev_features_t mask;
  
- 	return features & (vlan->set_features | ~MACVLAN_FEATURES);
+ 	features |= NETIF_F_ALL_FOR_ALL;
+ 	features &= (vlan->set_features | ~MACVLAN_FEATURES);
+ 	mask = features;
+ 
+ 	features = netdev_increment_features(vlan->lowerdev->features,
+ 					     features,
+ 					     mask);
+ 	if (!vlan->fwd_priv)
+ 		features |= NETIF_F_LLTX;
+ 
+ 	return features;
  }
  
  static const struct ethtool_ops macvlan_ethtool_ops = {
@@@ -802,7 -814,10 +813,7 @@@ static int macvlan_validate(struct nlat
  }
  
  int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 -			   struct nlattr *tb[], struct nlattr *data[],
 -			   int (*receive)(struct sk_buff *skb),
 -			   int (*forward)(struct net_device *dev,
 -					  struct sk_buff *skb))
 +			   struct nlattr *tb[], struct nlattr *data[])
  {
  	struct macvlan_dev *vlan = netdev_priv(dev);
  	struct macvlan_port *port;
@@@ -816,11 -831,13 +827,11 @@@
  	if (lowerdev == NULL)
  		return -ENODEV;
  
 -	/* When creating macvlans on top of other macvlans - use
 +	/* When creating macvlans or macvtaps on top of other macvlans - use
  	 * the real device as the lowerdev.
  	 */
 -	if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
 -		struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
 -		lowerdev = lowervlan->lowerdev;
 -	}
 +	if (netif_is_macvlan(lowerdev))
 +		lowerdev = macvlan_dev_real_dev(lowerdev);
  
  	if (!tb[IFLA_MTU])
  		dev->mtu = lowerdev->mtu;
@@@ -844,6 -861,8 +855,6 @@@
  	vlan->lowerdev = lowerdev;
  	vlan->dev      = dev;
  	vlan->port     = port;
 -	vlan->receive  = receive;
 -	vlan->forward  = forward;
  	vlan->set_features = MACVLAN_FEATURES;
  
  	vlan->mode     = MACVLAN_MODE_VEPA;
@@@ -888,7 -907,9 +899,7 @@@ EXPORT_SYMBOL_GPL(macvlan_common_newlin
  static int macvlan_newlink(struct net *src_net, struct net_device *dev,
  			   struct nlattr *tb[], struct nlattr *data[])
  {
 -	return macvlan_common_newlink(src_net, dev, tb, data,
 -				      netif_rx,
 -				      dev_forward_skb);
 +	return macvlan_common_newlink(src_net, dev, tb, data);
  }
  
  void macvlan_dellink(struct net_device *dev, struct list_head *head)
@@@ -1009,9 -1030,8 +1020,8 @@@ static int macvlan_device_event(struct 
  		break;
  	case NETDEV_FEAT_CHANGE:
  		list_for_each_entry(vlan, &port->vlans, list) {
- 			vlan->dev->features = dev->features & MACVLAN_FEATURES;
  			vlan->dev->gso_max_size = dev->gso_max_size;
- 			netdev_features_change(vlan->dev);
+ 			netdev_update_features(vlan->dev);
  		}
  		break;
  	case NETDEV_UNREGISTER:
diff --combined drivers/net/phy/phy.c
index 19da5ab6,98434b8..76e8936
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@@ -1,4 -1,7 +1,4 @@@
 -/*
 - * drivers/net/phy/phy.c
 - *
 - * Framework for configuring and reading PHY devices
 +/* Framework for configuring and reading PHY devices
   * Based on code in sungem_phy.c and gianfar_phy.c
   *
   * Author: Andy Fleming
@@@ -33,11 -36,11 +33,11 @@@
  #include <linux/timer.h>
  #include <linux/workqueue.h>
  #include <linux/mdio.h>
 -
 +#include <linux/io.h>
 +#include <linux/uaccess.h>
  #include <linux/atomic.h>
 -#include <asm/io.h>
 +
  #include <asm/irq.h>
 -#include <asm/uaccess.h>
  
  /**
   * phy_print_status - Convenience function to print out the current phy status
@@@ -45,14 -48,13 +45,14 @@@
   */
  void phy_print_status(struct phy_device *phydev)
  {
 -	if (phydev->link)
 +	if (phydev->link) {
  		pr_info("%s - Link is Up - %d/%s\n",
  			dev_name(&phydev->dev),
  			phydev->speed,
  			DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
 -	else
 +	} else	{
  		pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
 +	}
  }
  EXPORT_SYMBOL(phy_print_status);
  
@@@ -67,10 -69,12 +67,10 @@@
   */
  static int phy_clear_interrupt(struct phy_device *phydev)
  {
 -	int err = 0;
 -
  	if (phydev->drv->ack_interrupt)
 -		err = phydev->drv->ack_interrupt(phydev);
 +		return phydev->drv->ack_interrupt(phydev);
  
 -	return err;
 +	return 0;
  }
  
  /**
@@@ -82,11 -86,13 +82,11 @@@
   */
  static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  {
 -	int err = 0;
 -
  	phydev->interrupts = interrupts;
  	if (phydev->drv->config_intr)
 -		err = phydev->drv->config_intr(phydev);
 +		return phydev->drv->config_intr(phydev);
  
 -	return err;
 +	return 0;
  }
  
  
@@@ -100,14 -106,15 +100,14 @@@
   */
  static inline int phy_aneg_done(struct phy_device *phydev)
  {
 -	int retval;
 -
 -	retval = phy_read(phydev, MII_BMSR);
 +	int retval = phy_read(phydev, MII_BMSR);
  
  	return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
  }
  
  /* A structure for mapping a particular speed and duplex
 - * combination to a particular SUPPORTED and ADVERTISED value */
 + * combination to a particular SUPPORTED and ADVERTISED value
 + */
  struct phy_setting {
  	int speed;
  	int duplex;
@@@ -170,7 -177,8 +170,7 @@@ static inline int phy_find_setting(int 
  	int idx = 0;
  
  	while (idx < ARRAY_SIZE(settings) &&
 -			(settings[idx].speed != speed ||
 -			settings[idx].duplex != duplex))
 +	       (settings[idx].speed != speed || settings[idx].duplex != duplex))
  		idx++;
  
  	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
@@@ -237,7 -245,8 +237,7 @@@ int phy_ethtool_sset(struct phy_device 
  	if (cmd->phy_address != phydev->addr)
  		return -EINVAL;
  
 -	/* We make sure that we don't pass unsupported
 -	 * values in to the PHY */
 +	/* We make sure that we don't pass unsupported values in to the PHY */
  	cmd->advertising &= phydev->supported;
  
  	/* Verify the settings we care about. */
@@@ -280,7 -289,6 +280,7 @@@ int phy_ethtool_gset(struct phy_device 
  	cmd->supported = phydev->supported;
  
  	cmd->advertising = phydev->advertising;
 +	cmd->lp_advertising = phydev->lp_advertising;
  
  	ethtool_cmd_speed_set(cmd, phydev->speed);
  	cmd->duplex = phydev->duplex;
@@@ -304,7 -312,8 +304,7 @@@ EXPORT_SYMBOL(phy_ethtool_gset)
   * PHYCONTROL layer.  It changes registers without regard to
   * current state.  Use at own risk.
   */
 -int phy_mii_ioctl(struct phy_device *phydev,
 -		struct ifreq *ifr, int cmd)
 +int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
  {
  	struct mii_ioctl_data *mii_data = if_mii(ifr);
  	u16 val = mii_data->val_in;
@@@ -317,24 -326,25 +317,24 @@@
  	case SIOCGMIIREG:
  		mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
  						 mii_data->reg_num);
 -		break;
 +		return 0;
  
  	case SIOCSMIIREG:
  		if (mii_data->phy_id == phydev->addr) {
 -			switch(mii_data->reg_num) {
 +			switch (mii_data->reg_num) {
  			case MII_BMCR:
 -				if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
 +				if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
  					phydev->autoneg = AUTONEG_DISABLE;
  				else
  					phydev->autoneg = AUTONEG_ENABLE;
 -				if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
 +				if (!phydev->autoneg && (val & BMCR_FULLDPLX))
  					phydev->duplex = DUPLEX_FULL;
  				else
  					phydev->duplex = DUPLEX_HALF;
 -				if ((!phydev->autoneg) &&
 -						(val & BMCR_SPEED1000))
 +				if (!phydev->autoneg && (val & BMCR_SPEED1000))
  					phydev->speed = SPEED_1000;
 -				else if ((!phydev->autoneg) &&
 -						(val & BMCR_SPEED100))
 +				else if (!phydev->autoneg &&
 +					 (val & BMCR_SPEED100))
  					phydev->speed = SPEED_100;
  				break;
  			case MII_ADVERTISE:
@@@ -350,9 -360,12 +350,9 @@@
  			      mii_data->reg_num, val);
  
  		if (mii_data->reg_num == MII_BMCR &&
 -		    val & BMCR_RESET &&
 -		    phydev->drv->config_init) {
 -			phy_scan_fixups(phydev);
 -			phydev->drv->config_init(phydev);
 -		}
 -		break;
 +		    val & BMCR_RESET)
 +			return phy_init_hw(phydev);
 +		return 0;
  
  	case SIOCSHWTSTAMP:
  		if (phydev->drv->hwtstamp)
@@@ -362,6 -375,8 +362,6 @@@
  	default:
  		return -EOPNOTSUPP;
  	}
 -
 -	return 0;
  }
  EXPORT_SYMBOL(phy_mii_ioctl);
  
@@@ -384,6 -399,7 +384,6 @@@ int phy_start_aneg(struct phy_device *p
  		phy_sanitize_settings(phydev);
  
  	err = phydev->drv->config_aneg(phydev);
 -
  	if (err < 0)
  		goto out_unlock;
  
@@@ -403,18 -419,25 +403,18 @@@ out_unlock
  }
  EXPORT_SYMBOL(phy_start_aneg);
  
  /**
   * phy_start_machine - start PHY state machine tracking
   * @phydev: the phy_device struct
 - * @handler: callback function for state change notifications
   *
   * Description: The PHY infrastructure can run a state machine
   *   which tracks whether the PHY is starting up, negotiating,
   *   etc.  This function starts the timer which tracks the state
 - *   of the PHY.  If you want to be notified when the state changes,
 - *   pass in the callback @handler, otherwise, pass NULL.  If you
 - *   want to maintain your own state machine, do not call this
 - *   function.
 + *   of the PHY.  If you want to maintain your own state machine,
 + *   do not call this function.
   */
 -void phy_start_machine(struct phy_device *phydev,
 -		void (*handler)(struct net_device *))
 +void phy_start_machine(struct phy_device *phydev)
  {
 -	phydev->adjust_state = handler;
 -
  	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
  }
  
@@@ -434,6 -457,8 +434,6 @@@ void phy_stop_machine(struct phy_devic
  	if (phydev->state > PHY_UP)
  		phydev->state = PHY_UP;
  	mutex_unlock(&phydev->lock);
 -
 -	phydev->adjust_state = NULL;
  }
  
  /**
@@@ -470,8 -495,7 +470,8 @@@ static irqreturn_t phy_interrupt(int ir
  	/* The MDIO bus is not allowed to be written in interrupt
  	 * context, so we need to disable the irq here.  A work
  	 * queue will write the PHY to disable and clear the
 -	 * interrupt, and then reenable the irq line. */
 +	 * interrupt, and then reenable the irq line.
 +	 */
  	disable_irq_nosync(irq);
  	atomic_inc(&phydev->irq_disable);
  
@@@ -486,12 -510,16 +486,12 @@@
   */
  static int phy_enable_interrupts(struct phy_device *phydev)
  {
 -	int err;
 -
 -	err = phy_clear_interrupt(phydev);
 +	int err = phy_clear_interrupt(phydev);
  
  	if (err < 0)
  		return err;
  
 -	err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
 -
 -	return err;
 +	return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  }
  
  /**
@@@ -504,11 -532,13 +504,11 @@@ static int phy_disable_interrupts(struc
  
  	/* Disable PHY interrupts */
  	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
 -
  	if (err)
  		goto phy_err;
  
  	/* Clear the interrupt */
  	err = phy_clear_interrupt(phydev);
 -
  	if (err)
  		goto phy_err;
  
@@@ -532,18 -562,20 +532,16 @@@ phy_err
   */
  int phy_start_interrupts(struct phy_device *phydev)
  {
 -	int err = 0;
 -
  	atomic_set(&phydev->irq_disable, 0);
- 	if (request_irq(phydev->irq, phy_interrupt,
- 				IRQF_SHARED,
- 				"phy_interrupt",
- 				phydev) < 0) {
+ 	if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
+ 			phydev) < 0) {
  		pr_warn("%s: Can't get IRQ %d (PHY)\n",
  			phydev->bus->name, phydev->irq);
  		phydev->irq = PHY_POLL;
  		return 0;
  	}
  
 -	err = phy_enable_interrupts(phydev);
 -
 -	return err;
 +	return phy_enable_interrupts(phydev);
  }
  EXPORT_SYMBOL(phy_start_interrupts);
  
@@@ -553,20 -585,24 +551,20 @@@
   */
  int phy_stop_interrupts(struct phy_device *phydev)
  {
 -	int err;
 -
 -	err = phy_disable_interrupts(phydev);
 +	int err = phy_disable_interrupts(phydev);
  
  	if (err)
  		phy_error(phydev);
  
  	free_irq(phydev->irq, phydev);
  
 -	/*
 -	 * Cannot call flush_scheduled_work() here as desired because
 +	/* Cannot call flush_scheduled_work() here as desired because
  	 * of rtnl_lock(), but we do not really care about what would
  	 * be done, except from enable_irq(), so cancel any work
  	 * possibly pending and take care of the matter below.
  	 */
  	cancel_work_sync(&phydev->phy_queue);
 -	/*
 -	 * If work indeed has been cancelled, disable_irq() will have
 +	/* If work indeed has been cancelled, disable_irq() will have
  	 * been left unbalanced from phy_interrupt() and enable_irq()
  	 * has to be called so that other devices on the line work.
  	 */
@@@ -577,12 -613,14 +575,12 @@@
  }
  EXPORT_SYMBOL(phy_stop_interrupts);
  
 -
  /**
   * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
   * @work: work_struct that describes the work to be done
   */
  void phy_change(struct work_struct *work)
  {
 -	int err;
  	struct phy_device *phydev =
  		container_of(work, struct phy_device, phy_queue);
  
@@@ -590,7 -628,9 +588,7 @@@
  	    !phydev->drv->did_interrupt(phydev))
  		goto ignore;
  
 -	err = phy_disable_interrupts(phydev);
 -
 -	if (err)
 +	if (phy_disable_interrupts(phydev))
  		goto phy_err;
  
  	mutex_lock(&phydev->lock);
@@@ -602,13 -642,16 +600,13 @@@
  	enable_irq(phydev->irq);
  
  	/* Reenable interrupts */
 -	if (PHY_HALTED != phydev->state)
 -		err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
 -
 -	if (err)
 +	if (PHY_HALTED != phydev->state &&
 +	    phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
  		goto irq_enable_err;
  
  	/* reschedule state queue work to run as soon as possible */
  	cancel_delayed_work_sync(&phydev->state_queue);
  	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
 -
  	return;
  
  ignore:
@@@ -647,12 -690,13 +645,12 @@@ void phy_stop(struct phy_device *phydev
  out_unlock:
  	mutex_unlock(&phydev->lock);
  
 -	/*
 -	 * Cannot call flush_scheduled_work() here as desired because
 +	/* Cannot call flush_scheduled_work() here as desired because
  	 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  	 * will not reenable interrupts.
  	 */
  }
 -
 +EXPORT_SYMBOL(phy_stop);
  
  /**
   * phy_start - start or restart a PHY device
@@@ -669,19 -713,20 +667,19 @@@ void phy_start(struct phy_device *phyde
  	mutex_lock(&phydev->lock);
  
  	switch (phydev->state) {
 -		case PHY_STARTING:
 -			phydev->state = PHY_PENDING;
 -			break;
 -		case PHY_READY:
 -			phydev->state = PHY_UP;
 -			break;
 -		case PHY_HALTED:
 -			phydev->state = PHY_RESUMING;
 -		default:
 -			break;
 +	case PHY_STARTING:
 +		phydev->state = PHY_PENDING;
 +		break;
 +	case PHY_READY:
 +		phydev->state = PHY_UP;
 +		break;
 +	case PHY_HALTED:
 +		phydev->state = PHY_RESUMING;
 +	default:
 +		break;
  	}
  	mutex_unlock(&phydev->lock);
  }
 -EXPORT_SYMBOL(phy_stop);
  EXPORT_SYMBOL(phy_start);
  
  /**
@@@ -693,132 -738,160 +691,132 @@@ void phy_state_machine(struct work_stru
  	struct delayed_work *dwork = to_delayed_work(work);
  	struct phy_device *phydev =
  			container_of(dwork, struct phy_device, state_queue);
 -	int needs_aneg = 0;
 +	int needs_aneg = 0, do_suspend = 0;
  	int err = 0;
  
  	mutex_lock(&phydev->lock);
  
 -	if (phydev->adjust_state)
 -		phydev->adjust_state(phydev->attached_dev);
 +	switch (phydev->state) {
 +	case PHY_DOWN:
 +	case PHY_STARTING:
 +	case PHY_READY:
 +	case PHY_PENDING:
 +		break;
 +	case PHY_UP:
 +		needs_aneg = 1;
  
 -	switch(phydev->state) {
 -		case PHY_DOWN:
 -		case PHY_STARTING:
 -		case PHY_READY:
 -		case PHY_PENDING:
 -			break;
 -		case PHY_UP:
 -			needs_aneg = 1;
 +		phydev->link_timeout = PHY_AN_TIMEOUT;
  
 -			phydev->link_timeout = PHY_AN_TIMEOUT;
 +		break;
 +	case PHY_AN:
 +		err = phy_read_status(phydev);
 +		if (err < 0)
 +			break;
  
 +		/* If the link is down, give up on negotiation for now */
 +		if (!phydev->link) {
 +			phydev->state = PHY_NOLINK;
 +			netif_carrier_off(phydev->attached_dev);
 +			phydev->adjust_link(phydev->attached_dev);
  			break;
 -		case PHY_AN:
 -			err = phy_read_status(phydev);
 +		}
  
 -			if (err < 0)
 -				break;
 +		/* Check if negotiation is done.  Break if there's an error */
 +		err = phy_aneg_done(phydev);
 +		if (err < 0)
 +			break;
  
 -			/* If the link is down, give up on
 -			 * negotiation for now */
 -			if (!phydev->link) {
 -				phydev->state = PHY_NOLINK;
 -				netif_carrier_off(phydev->attached_dev);
 -				phydev->adjust_link(phydev->attached_dev);
 -				break;
 -			}
 +		/* If AN is done, we're running */
 +		if (err > 0) {
 +			phydev->state = PHY_RUNNING;
 +			netif_carrier_on(phydev->attached_dev);
 +			phydev->adjust_link(phydev->attached_dev);
  
 -			/* Check if negotiation is done.  Break
 -			 * if there's an error */
 -			err = phy_aneg_done(phydev);
 -			if (err < 0)
 +		} else if (0 == phydev->link_timeout--) {
 +			needs_aneg = 1;
 +			/* If we have the magic_aneg bit, we try again */
 +			if (phydev->drv->flags & PHY_HAS_MAGICANEG)
  				break;
 -
 -			/* If AN is done, we're running */
 -			if (err > 0) {
 -				phydev->state = PHY_RUNNING;
 -				netif_carrier_on(phydev->attached_dev);
 -				phydev->adjust_link(phydev->attached_dev);
 -
 -			} else if (0 == phydev->link_timeout--) {
 -				needs_aneg = 1;
 -				/* If we have the magic_aneg bit,
 -				 * we try again */
 -				if (phydev->drv->flags & PHY_HAS_MAGICANEG)
 -					break;
 -			}
 +		}
 +		break;
 +	case PHY_NOLINK:
 +		err = phy_read_status(phydev);
 +		if (err)
  			break;
 -		case PHY_NOLINK:
 -			err = phy_read_status(phydev);
 -
 -			if (err)
 -				break;
  
 -			if (phydev->link) {
 -				phydev->state = PHY_RUNNING;
 -				netif_carrier_on(phydev->attached_dev);
 -				phydev->adjust_link(phydev->attached_dev);
 -			}
 +		if (phydev->link) {
 +			phydev->state = PHY_RUNNING;
 +			netif_carrier_on(phydev->attached_dev);
 +			phydev->adjust_link(phydev->attached_dev);
 +		}
 +		break;
 +	case PHY_FORCING:
 +		err = genphy_update_link(phydev);
 +		if (err)
  			break;
 -		case PHY_FORCING:
 -			err = genphy_update_link(phydev);
 -
 -			if (err)
 -				break;
  
 -			if (phydev->link) {
 -				phydev->state = PHY_RUNNING;
 -				netif_carrier_on(phydev->attached_dev);
 -			} else {
 -				if (0 == phydev->link_timeout--)
 -					needs_aneg = 1;
 -			}
 +		if (phydev->link) {
 +			phydev->state = PHY_RUNNING;
 +			netif_carrier_on(phydev->attached_dev);
 +		} else {
 +			if (0 == phydev->link_timeout--)
 +				needs_aneg = 1;
 +		}
  
 -			phydev->adjust_link(phydev->attached_dev);
 -			break;
 -		case PHY_RUNNING:
 -			/* Only register a CHANGE if we are
 -			 * polling or ignoring interrupts
 -			 */
 -			if (!phy_interrupt_is_valid(phydev))
 -				phydev->state = PHY_CHANGELINK;
 +		phydev->adjust_link(phydev->attached_dev);
 +		break;
 +	case PHY_RUNNING:
 +		/* Only register a CHANGE if we are
 +		 * polling or ignoring interrupts
 +		 */
 +		if (!phy_interrupt_is_valid(phydev))
 +			phydev->state = PHY_CHANGELINK;
 +		break;
 +	case PHY_CHANGELINK:
 +		err = phy_read_status(phydev);
 +		if (err)
  			break;
 -		case PHY_CHANGELINK:
 -			err = phy_read_status(phydev);
  
 -			if (err)
 -				break;
 +		if (phydev->link) {
 +			phydev->state = PHY_RUNNING;
 +			netif_carrier_on(phydev->attached_dev);
 +		} else {
 +			phydev->state = PHY_NOLINK;
 +			netif_carrier_off(phydev->attached_dev);
 +		}
  
 -			if (phydev->link) {
 -				phydev->state = PHY_RUNNING;
 -				netif_carrier_on(phydev->attached_dev);
 -			} else {
 -				phydev->state = PHY_NOLINK;
 -				netif_carrier_off(phydev->attached_dev);
 -			}
 +		phydev->adjust_link(phydev->attached_dev);
  
 +		if (phy_interrupt_is_valid(phydev))
 +			err = phy_config_interrupt(phydev,
 +						   PHY_INTERRUPT_ENABLED);
 +		break;
 +	case PHY_HALTED:
 +		if (phydev->link) {
 +			phydev->link = 0;
 +			netif_carrier_off(phydev->attached_dev);
  			phydev->adjust_link(phydev->attached_dev);
 -
 -			if (phy_interrupt_is_valid(phydev))
 -				err = phy_config_interrupt(phydev,
 -						PHY_INTERRUPT_ENABLED);
 -			break;
 -		case PHY_HALTED:
 -			if (phydev->link) {
 -				phydev->link = 0;
 -				netif_carrier_off(phydev->attached_dev);
 -				phydev->adjust_link(phydev->attached_dev);
 -			}
 +			do_suspend = 1;
 +		}
 +		break;
 +	case PHY_RESUMING:
 +		err = phy_clear_interrupt(phydev);
 +		if (err)
  			break;
 -		case PHY_RESUMING:
 -
 -			err = phy_clear_interrupt(phydev);
  
 -			if (err)
 -				break;
 -
 -			err = phy_config_interrupt(phydev,
 -					PHY_INTERRUPT_ENABLED);
 +		err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
 +		if (err)
 +			break;
  
 -			if (err)
 +		if (AUTONEG_ENABLE == phydev->autoneg) {
 +			err = phy_aneg_done(phydev);
 +			if (err < 0)
  				break;
  
 -			if (AUTONEG_ENABLE == phydev->autoneg) {
 -				err = phy_aneg_done(phydev);
 -				if (err < 0)
 -					break;
 -
 -				/* err > 0 if AN is done.
 -				 * Otherwise, it's 0, and we're
 -				 * still waiting for AN */
 -				if (err > 0) {
 -					err = phy_read_status(phydev);
 -					if (err)
 -						break;
 -
 -					if (phydev->link) {
 -						phydev->state = PHY_RUNNING;
 -						netif_carrier_on(phydev->attached_dev);
 -					} else
 -						phydev->state = PHY_NOLINK;
 -					phydev->adjust_link(phydev->attached_dev);
 -				} else {
 -					phydev->state = PHY_AN;
 -					phydev->link_timeout = PHY_AN_TIMEOUT;
 -				}
 -			} else {
 +			/* err > 0 if AN is done.
 +			 * Otherwise, it's 0, and we're  still waiting for AN
 +			 */
 +			if (err > 0) {
  				err = phy_read_status(phydev);
  				if (err)
  					break;
@@@ -826,28 -899,11 +824,28 @@@
  				if (phydev->link) {
  					phydev->state = PHY_RUNNING;
  					netif_carrier_on(phydev->attached_dev);
 -				} else
 +				} else	{
  					phydev->state = PHY_NOLINK;
 +				}
  				phydev->adjust_link(phydev->attached_dev);
 +			} else {
 +				phydev->state = PHY_AN;
 +				phydev->link_timeout = PHY_AN_TIMEOUT;
  			}
 -			break;
 +		} else {
 +			err = phy_read_status(phydev);
 +			if (err)
 +				break;
 +
 +			if (phydev->link) {
 +				phydev->state = PHY_RUNNING;
 +				netif_carrier_on(phydev->attached_dev);
 +			} else	{
 +				phydev->state = PHY_NOLINK;
 +			}
 +			phydev->adjust_link(phydev->attached_dev);
 +		}
 +		break;
  	}
  
  	mutex_unlock(&phydev->lock);
@@@ -855,14 -911,11 +853,14 @@@
  	if (needs_aneg)
  		err = phy_start_aneg(phydev);
  
 +	if (do_suspend)
 +		phy_suspend(phydev);
 +
  	if (err < 0)
  		phy_error(phydev);
  
  	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
 -			PHY_STATE_TIME * HZ);
 +			   PHY_STATE_TIME * HZ);
  }
  
  void phy_mac_interrupt(struct phy_device *phydev, int new_link)
@@@ -904,10 -957,14 +902,10 @@@ static inline void mmd_phy_indirect(str
  static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  				 int addr)
  {
  	mmd_phy_indirect(bus, prtad, devad, addr);
  
  	/* Read the content of the MMD's selected register */
 -	ret = bus->read(bus, addr, MII_MMD_DATA);
 -
 -	return ret;
 +	return bus->read(bus, addr, MII_MMD_DATA);
  }
  
  /**
@@@ -947,6 -1004,8 +945,6 @@@ static void phy_write_mmd_indirect(stru
   */
  int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
  {
 -	int ret = -EPROTONOSUPPORT;
 -
  	/* According to 802.3az,the EEE is supported only in full duplex-mode.
  	 * Also EEE feature is active when core is operating with MII, GMII
  	 * or RGMII.
@@@ -972,7 -1031,7 +970,7 @@@
  
  		cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
  		if (!cap)
 -			goto eee_exit;
 +			return -EPROTONOSUPPORT;
  
  		/* Check which link settings negotiated and verify it in
  		 * the EEE advertising registers.
@@@ -991,7 -1050,7 +989,7 @@@
  		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
  		idx = phy_find_setting(phydev->speed, phydev->duplex);
  		if (!(lp & adv & settings[idx].setting))
 -			goto eee_exit;
 +			return -EPROTONOSUPPORT;
  
  		if (clk_stop_enable) {
  			/* Configure the PHY to stop receiving xMII
@@@ -1008,10 -1067,11 +1006,10 @@@
  					       MDIO_MMD_PCS, phydev->addr, val);
  		}
  
 -		ret = 0; /* EEE supported */
 +		return 0; /* EEE supported */
  	}
  
 -eee_exit:
 -	return ret;
 +	return -EPROTONOSUPPORT;
  }
  EXPORT_SYMBOL(phy_init_eee);
  
@@@ -1026,6 -1086,7 +1024,6 @@@ int phy_get_eee_err(struct phy_device *
  {
  	return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
  				     MDIO_MMD_PCS, phydev->addr);
 -
  }
  EXPORT_SYMBOL(phy_get_eee_err);
  
@@@ -1075,8 -1136,9 +1073,8 @@@ EXPORT_SYMBOL(phy_ethtool_get_eee)
   */
  int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
  {
 -	int val;
 +	int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  
 -	val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  	phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
  			       phydev->addr, val);
  
diff --combined drivers/net/usb/mcs7830.c
index aea68bc,f546378..36ff001
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@@ -36,7 -36,8 +36,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
 - * along with this program; if not, write to the Free Software
 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 + * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include <linux/crc32.h>
@@@ -116,7 -117,6 +116,6 @@@ enum 
  struct mcs7830_data {
  	u8 multi_filter[8];
  	u8 config;
- 	u8 link_counter;
  };
  
  static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@@ -560,26 -560,16 +559,16 @@@ static void mcs7830_status(struct usbne
  {
  	u8 *buf = urb->transfer_buffer;
  	bool link, link_changed;
- 	struct mcs7830_data *data = mcs7830_get_data(dev);
  
  	if (urb->actual_length < 16)
  		return;
  
- 	link = !(buf[1] & 0x20);
+ 	link = !(buf[1] == 0x20);
  	link_changed = netif_carrier_ok(dev->net) != link;
  	if (link_changed) {
- 		data->link_counter++;
- 		/*
- 		   track link state 20 times to guard against erroneous
- 		   link state changes reported sometimes by the chip
- 		 */
- 		if (data->link_counter > 20) {
- 			data->link_counter = 0;
- 			usbnet_link_change(dev, link, 0);
- 			netdev_dbg(dev->net, "Link Status is: %d\n", link);
- 		}
- 	} else
- 		data->link_counter = 0;
+ 		usbnet_link_change(dev, link, 0);
+ 		netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ 	}
  }
  
  static const struct driver_info moschip_info = {
diff --combined drivers/net/virtio_net.c
index c51a988,5d77644..7b17240
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@@ -13,7 -13,8 +13,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
 - * along with this program; if not, write to the Free Software
 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 + * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  //#define DEBUG
  #include <linux/netdevice.h>
@@@ -873,15 -874,16 +873,15 @@@ static netdev_tx_t start_xmit(struct sk
  /*
   * Send command via the control virtqueue and check status.  Commands
   * supported by the hypervisor, as indicated by feature bits, should
 - * never fail unless improperly formated.
 + * never fail unless improperly formatted.
   */
  static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 -				 struct scatterlist *out,
 -				 struct scatterlist *in)
 +				 struct scatterlist *out)
  {
  	struct scatterlist *sgs[4], hdr, stat;
  	struct virtio_net_ctrl_hdr ctrl;
  	virtio_net_ctrl_ack status = ~0;
 -	unsigned out_num = 0, in_num = 0, tmp;
 +	unsigned out_num = 0, tmp;
  
  	/* Caller should know better */
  	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@@ -894,13 -896,16 +894,13 @@@
  
  	if (out)
  		sgs[out_num++] = out;
  
  	/* Add return status. */
  	sg_init_one(&stat, &status, sizeof(status));
 -	sgs[out_num + in_num++] = &stat;
 +	sgs[out_num] = &stat;
  
 -	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
 -	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
 -	       < 0);
 +	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
 +	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC) < 0);
  
  	if (unlikely(!virtqueue_kick(vi->cvq)))
  		return status == VIRTIO_NET_OK;
@@@ -930,7 -935,8 +930,7 @@@ static int virtnet_set_mac_address(stru
  	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
  		sg_init_one(&sg, addr->sa_data, dev->addr_len);
  		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
 -					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
 -					  &sg, NULL)) {
 +					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
  			dev_warn(&vdev->dev,
  				 "Failed to set mac address by vq command.\n");
  			return -EINVAL;
@@@ -1003,7 -1009,7 +1003,7 @@@ static void virtnet_ack_link_announce(s
  {
  	rtnl_lock();
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
 -				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
 +				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
  		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
  	rtnl_unlock();
  }
@@@ -1021,7 -1027,7 +1021,7 @@@ static int virtnet_set_queues(struct vi
  	sg_init_one(&sg, &s, sizeof(s));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
 -				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
 +				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
  		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
  			 queue_pairs);
  		return -EINVAL;
@@@ -1061,7 -1067,7 +1061,7 @@@ static void virtnet_set_rx_mode(struct 
  	void *buf;
  	int i;
  
 -	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
 +	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
  	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
  		return;
  
@@@ -1071,14 -1077,16 +1071,14 @@@
  	sg_init_one(sg, &promisc, sizeof(promisc));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
 -				  VIRTIO_NET_CTRL_RX_PROMISC,
 -				  sg, NULL))
 +				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
  		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
  			 promisc ? "en" : "dis");
  
  	sg_init_one(sg, &allmulti, sizeof(allmulti));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
 -				  VIRTIO_NET_CTRL_RX_ALLMULTI,
 -				  sg, NULL))
 +				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
  		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
  			 allmulti ? "en" : "dis");
  
@@@ -1114,7 -1122,8 +1114,7 @@@
  		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
 -				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
 -				  sg, NULL))
 +				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
  		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
  
  	kfree(buf);
@@@ -1129,7 -1138,7 +1129,7 @@@ static int virtnet_vlan_rx_add_vid(stru
  	sg_init_one(&sg, &vid, sizeof(vid));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 -				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
 +				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
  		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
  	return 0;
  }
@@@ -1143,7 -1152,7 +1143,7 @@@ static int virtnet_vlan_rx_kill_vid(str
  	sg_init_one(&sg, &vid, sizeof(vid));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 -				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
 +				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
  		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
  	return 0;
  }
@@@ -1788,16 -1797,17 +1788,17 @@@ static int virtnet_restore(struct virti
  	if (err)
  		return err;
  
- 	if (netif_running(vi->dev))
+ 	if (netif_running(vi->dev)) {
+ 		for (i = 0; i < vi->curr_queue_pairs; i++)
+ 			if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ 				schedule_delayed_work(&vi->refill, 0);
+ 
  		for (i = 0; i < vi->max_queue_pairs; i++)
  			virtnet_napi_enable(&vi->rq[i]);
+ 	}
  
  	netif_device_attach(vi->dev);
  
- 	for (i = 0; i < vi->curr_queue_pairs; i++)
- 		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- 			schedule_delayed_work(&vi->refill, 0);
- 
  	mutex_lock(&vi->config_lock);
  	vi->config_enable = true;
  	mutex_unlock(&vi->config_lock);
diff --combined drivers/net/vxlan.c
index ab2e92e,ed384fe..481f85d
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@@ -916,32 -916,17 +916,32 @@@ static bool vxlan_snoop(struct net_devi
  }
  
  /* See if multicast group is already in use by other ID */
 -static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip)
 +static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
  {
  	struct vxlan_dev *vxlan;
  
 +	/* The vxlan_sock is only used by dev, leaving group has
 +	 * no effect on other vxlan devices.
 +	 */
 +	if (atomic_read(&dev->vn_sock->refcnt) == 1)
 +		return false;
 +
  	list_for_each_entry(vxlan, &vn->vxlan_list, next) {
 -		if (!netif_running(vxlan->dev))
 +		if (!netif_running(vxlan->dev) || vxlan == dev)
  			continue;
  
 -		if (vxlan_addr_equal(&vxlan->default_dst.remote_ip,
 -				     remote_ip))
 -			return true;
 +		if (vxlan->vn_sock != dev->vn_sock)
 +			continue;
 +
 +		if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
 +				      &dev->default_dst.remote_ip))
 +			continue;
 +
 +		if (vxlan->default_dst.remote_ifindex !=
 +		    dev->default_dst.remote_ifindex)
 +			continue;
 +
 +		return true;
  	}
  
  	return false;
@@@ -1081,7 -1066,7 +1081,7 @@@ static void vxlan_rcv(struct vxlan_soc
  	struct iphdr *oip = NULL;
  	struct ipv6hdr *oip6 = NULL;
  	struct vxlan_dev *vxlan;
 -	struct pcpu_tstats *stats;
 +	struct pcpu_sw_netstats *stats;
  	union vxlan_addr saddr;
  	__u32 vni;
  	int err = 0;
@@@ -1381,6 -1366,20 +1381,6 @@@ static bool route_shortcircuit(struct n
  	return false;
  }
  
 -static void vxlan_sock_put(struct sk_buff *skb)
 -{
 -	sock_put(skb->sk);
 -}
 -
 -/* On transmit, associate with the tunnel socket */
 -static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
 -{
 -	skb_orphan(skb);
 -	sock_hold(sk);
 -	skb->sk = sk;
 -	skb->destructor = vxlan_sock_put;
 -}
 -
  /* Compute source port for outgoing packet
   *   first choice to use L4 flow hash since it will spread
   *     better and maybe available from hardware
@@@ -1391,7 -1390,7 +1391,7 @@@ __be16 vxlan_src_port(__u16 port_min, _
  	unsigned int range = (port_max - port_min) + 1;
  	u32 hash;
  
 -	hash = skb_get_rxhash(skb);
 +	hash = skb_get_hash(skb);
  	if (!hash)
  		hash = jhash(skb->data, 2 * ETH_ALEN,
  			     (__force u32) skb->protocol);
@@@ -1500,6 -1499,8 +1500,6 @@@ static int vxlan6_xmit_skb(struct vxlan
  	ip6h->daddr	  = *daddr;
  	ip6h->saddr	  = *saddr;
  
 -	vxlan_set_owner(vs->sock->sk, skb);
 -
  	err = handle_offloads(skb);
  	if (err)
  		return err;
@@@ -1556,6 -1557,8 +1556,6 @@@ int vxlan_xmit_skb(struct vxlan_sock *v
  	uh->len = htons(skb->len);
  	uh->check = 0;
  
 -	vxlan_set_owner(vs->sock->sk, skb);
 -
  	err = handle_offloads(skb);
  	if (err)
  		return err;
@@@ -1569,12 -1572,11 +1569,12 @@@ EXPORT_SYMBOL_GPL(vxlan_xmit_skb)
  static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
  			       struct vxlan_dev *dst_vxlan)
  {
 -	struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
 -	struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
 +	struct pcpu_sw_netstats *tx_stats, *rx_stats;
  	union vxlan_addr loopback;
  	union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
  
 +	tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
 +	rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
  	skb->pkt_type = PACKET_HOST;
  	skb->encapsulation = 0;
  	skb->dev = dst_vxlan->dev;
@@@ -1768,7 -1770,7 +1768,7 @@@ static netdev_tx_t vxlan_xmit(struct sk
  	struct vxlan_dev *vxlan = netdev_priv(dev);
  	struct ethhdr *eth;
  	bool did_rsc = false;
 -	struct vxlan_rdst *rdst;
 +	struct vxlan_rdst *rdst, *fdst = NULL;
  	struct vxlan_fdb *f;
  
  	skb_reset_mac_header(skb);
@@@ -1810,7 -1812,7 +1810,7 @@@
  				vxlan_fdb_miss(vxlan, eth->h_dest);
  
  			dev->stats.tx_dropped++;
 -			dev_kfree_skb(skb);
 +			kfree_skb(skb);
  			return NETDEV_TX_OK;
  		}
  	}
@@@ -1818,19 -1820,12 +1818,19 @@@
  	list_for_each_entry_rcu(rdst, &f->remotes, list) {
  		struct sk_buff *skb1;
  
 +		if (!fdst) {
 +			fdst = rdst;
 +			continue;
 +		}
  		skb1 = skb_clone(skb, GFP_ATOMIC);
  		if (skb1)
  			vxlan_xmit_one(skb1, dev, rdst, did_rsc);
  	}
  
 -	dev_kfree_skb(skb);
 +	if (fdst)
 +		vxlan_xmit_one(skb, dev, fdst, did_rsc);
 +	else
 +		kfree_skb(skb);
  	return NETDEV_TX_OK;
  }
  
@@@ -1887,12 -1882,12 +1887,12 @@@ static int vxlan_init(struct net_devic
  	struct vxlan_sock *vs;
  	int i;
  
 -	dev->tstats = alloc_percpu(struct pcpu_tstats);
 +	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
 -		struct pcpu_tstats *vxlan_stats;
 +		struct pcpu_sw_netstats *vxlan_stats;
  		vxlan_stats = per_cpu_ptr(dev->tstats, i);
  		u64_stats_init(&vxlan_stats->syncp);
  	}
@@@ -1940,6 -1935,7 +1940,6 @@@ static void vxlan_uninit(struct net_dev
  /* Start ageing timer and join group when device is brought up */
  static int vxlan_open(struct net_device *dev)
  {
 -	struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
  	struct vxlan_dev *vxlan = netdev_priv(dev);
  	struct vxlan_sock *vs = vxlan->vn_sock;
  
@@@ -1947,7 -1943,8 +1947,7 @@@
  	if (!vs)
  		return -ENOTCONN;
  
 -	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
 -	    vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
 +	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
  		vxlan_sock_hold(vs);
  		dev_hold(dev);
  		queue_work(vxlan_wq, &vxlan->igmp_join);
@@@ -1986,7 -1983,7 +1986,7 @@@ static int vxlan_stop(struct net_devic
  	struct vxlan_sock *vs = vxlan->vn_sock;
  
  	if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
 -	    ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
 +	    !vxlan_group_used(vn, vxlan)) {
  		vxlan_sock_hold(vs);
  		dev_hold(dev);
  		queue_work(vxlan_wq, &vxlan->igmp_leave);
@@@ -2004,29 -2001,6 +2004,29 @@@ static void vxlan_set_multicast_list(st
  {
  }
  
 +static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +	struct vxlan_dev *vxlan = netdev_priv(dev);
 +	struct vxlan_rdst *dst = &vxlan->default_dst;
 +	struct net_device *lowerdev;
 +	int max_mtu;
 +
 +	lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
 +	if (lowerdev == NULL)
 +		return eth_change_mtu(dev, new_mtu);
 +
 +	if (dst->remote_ip.sa.sa_family == AF_INET6)
 +		max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
 +	else
 +		max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
 +
 +	if (new_mtu < 68 || new_mtu > max_mtu)
 +		return -EINVAL;
 +
 +	dev->mtu = new_mtu;
 +	return 0;
 +}
 +
  static const struct net_device_ops vxlan_netdev_ops = {
  	.ndo_init		= vxlan_init,
  	.ndo_uninit		= vxlan_uninit,
@@@ -2035,7 -2009,7 +2035,7 @@@
  	.ndo_start_xmit		= vxlan_xmit,
  	.ndo_get_stats64	= ip_tunnel_get_stats64,
  	.ndo_set_rx_mode	= vxlan_set_multicast_list,
 -	.ndo_change_mtu		= eth_change_mtu,
 +	.ndo_change_mtu		= vxlan_change_mtu,
  	.ndo_validate_addr	= eth_validate_addr,
  	.ndo_set_mac_address	= eth_mac_addr,
  	.ndo_fdb_add		= vxlan_fdb_add,
@@@ -2466,7 -2440,8 +2466,8 @@@ static int vxlan_newlink(struct net *ne
  		/* update header length based on lower device */
  		dev->hard_header_len = lowerdev->hard_header_len +
  				       (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- 	}
+ 	} else if (use_ipv6)
+ 		vxlan->flags |= VXLAN_F_IPV6;
  
  	if (data[IFLA_VXLAN_TOS])
  		vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --combined drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 857ede3,a366d6b..741b38d
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@@ -29,8 -29,7 +29,8 @@@ static void ar9002_hw_set_desc_link(voi
  	((struct ath_desc*) ds)->ds_link = ds_link;
  }
  
 -static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 +static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
 +			      u32 *sync_cause_p)
  {
  	u32 isr = 0;
  	u32 mask2 = 0;
@@@ -77,9 -76,16 +77,16 @@@
  				mask2 |= ATH9K_INT_CST;
  			if (isr2 & AR_ISR_S2_TSFOOR)
  				mask2 |= ATH9K_INT_TSFOOR;
+ 
+ 			if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ 				REG_WRITE(ah, AR_ISR_S2, isr2);
+ 				isr &= ~AR_ISR_BCNMISC;
+ 			}
  		}
  
- 		isr = REG_READ(ah, AR_ISR_RAC);
+ 		if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ 			isr = REG_READ(ah, AR_ISR_RAC);
+ 
  		if (isr == 0xffffffff) {
  			*masked = 0;
  			return false;
@@@ -98,11 -104,23 +105,23 @@@
  
  			*masked |= ATH9K_INT_TX;
  
- 			s0_s = REG_READ(ah, AR_ISR_S0_S);
+ 			if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ 				s0_s = REG_READ(ah, AR_ISR_S0_S);
+ 				s1_s = REG_READ(ah, AR_ISR_S1_S);
+ 			} else {
+ 				s0_s = REG_READ(ah, AR_ISR_S0);
+ 				REG_WRITE(ah, AR_ISR_S0, s0_s);
+ 				s1_s = REG_READ(ah, AR_ISR_S1);
+ 				REG_WRITE(ah, AR_ISR_S1, s1_s);
+ 
+ 				isr &= ~(AR_ISR_TXOK |
+ 					 AR_ISR_TXDESC |
+ 					 AR_ISR_TXERR |
+ 					 AR_ISR_TXEOL);
+ 			}
+ 
  			ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
  			ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
- 
- 			s1_s = REG_READ(ah, AR_ISR_S1_S);
  			ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
  			ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
  		}
@@@ -115,13 -133,15 +134,15 @@@
  		*masked |= mask2;
  	}
  
- 	if (AR_SREV_9100(ah))
- 		return true;
- 
- 	if (isr & AR_ISR_GENTMR) {
+ 	if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
  		u32 s5_s;
  
- 		s5_s = REG_READ(ah, AR_ISR_S5_S);
+ 		if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ 			s5_s = REG_READ(ah, AR_ISR_S5_S);
+ 		} else {
+ 			s5_s = REG_READ(ah, AR_ISR_S5);
+ 		}
+ 
  		ah->intr_gen_timer_trigger =
  				MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
  
@@@ -134,11 -154,23 +155,24 @@@
  		if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
  		    !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  			*masked |= ATH9K_INT_TIM_TIMER;
+ 
+ 		if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ 			REG_WRITE(ah, AR_ISR_S5, s5_s);
+ 			isr &= ~AR_ISR_GENTMR;
+ 		}
  	}
  
+ 	if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ 		REG_WRITE(ah, AR_ISR, isr);
+ 		REG_READ(ah, AR_ISR);
+ 	}
+ 
+ 	if (AR_SREV_9100(ah))
+ 		return true;
+ 
  	if (sync_cause) {
 -		ath9k_debug_sync_cause(common, sync_cause);
 +		if (sync_cause_p)
 +			*sync_cause_p = sync_cause;
  		fatal_int =
  			(sync_cause &
  			 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
diff --combined drivers/net/wireless/ath/ath9k/main.c
index 173a889,21aa09e..21b764b
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@@ -82,22 -82,6 +82,22 @@@ static bool ath9k_setpower(struct ath_s
  	return ret;
  }
  
 +void ath_ps_full_sleep(unsigned long data)
 +{
 +	struct ath_softc *sc = (struct ath_softc *) data;
 +	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 +	bool reset;
 +
 +	spin_lock(&common->cc_lock);
 +	ath_hw_cycle_counters_update(common);
 +	spin_unlock(&common->cc_lock);
 +
 +	ath9k_hw_setrxabort(sc->sc_ah, 1);
 +	ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
 +
 +	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
 +}
 +
  void ath9k_ps_wakeup(struct ath_softc *sc)
  {
  	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@@ -108,7 -92,6 +108,7 @@@
  	if (++sc->ps_usecount != 1)
  		goto unlock;
  
 +	del_timer_sync(&sc->sleep_timer);
  	power_mode = sc->sc_ah->power_mode;
  	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
  
@@@ -134,17 -117,17 +134,17 @@@ void ath9k_ps_restore(struct ath_softc 
  	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  	enum ath9k_power_mode mode;
  	unsigned long flags;
 -	bool reset;
  
  	spin_lock_irqsave(&sc->sc_pm_lock, flags);
  	if (--sc->ps_usecount != 0)
  		goto unlock;
  
  	if (sc->ps_idle) {
 -		ath9k_hw_setrxabort(sc->sc_ah, 1);
 -		ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
 -		mode = ATH9K_PM_FULL_SLEEP;
 -	} else if (sc->ps_enabled &&
 +		mod_timer(&sc->sleep_timer, jiffies + HZ / 10);
 +		goto unlock;
 +	}
 +
 +	if (sc->ps_enabled &&
  		   !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
  				     PS_WAIT_FOR_CAB |
  				     PS_WAIT_FOR_PSPOLL_DATA |
@@@ -180,13 -163,13 +180,13 @@@ static void __ath_cancel_work(struct at
  #endif
  }
  
 -static void ath_cancel_work(struct ath_softc *sc)
 +void ath_cancel_work(struct ath_softc *sc)
  {
  	__ath_cancel_work(sc);
  	cancel_work_sync(&sc->hw_reset_work);
  }
  
 -static void ath_restart_work(struct ath_softc *sc)
 +void ath_restart_work(struct ath_softc *sc)
  {
  	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
  
@@@ -504,13 -487,8 +504,13 @@@ void ath9k_tasklet(unsigned long data
  			ath_tx_edma_tasklet(sc);
  		else
  			ath_tx_tasklet(sc);
 +
 +		wake_up(&sc->tx_wait);
  	}
  
 +	if (status & ATH9K_INT_GENTIMER)
 +		ath_gen_timer_isr(sc->sc_ah);
 +
  	ath9k_btcoex_handle_interrupt(sc, status);
  
  	/* re-enable hardware interrupt */
@@@ -541,7 -519,6 +541,7 @@@ irqreturn_t ath_isr(int irq, void *dev
  	struct ath_hw *ah = sc->sc_ah;
  	struct ath_common *common = ath9k_hw_common(ah);
  	enum ath9k_int status;
 +	u32 sync_cause;
  	bool sched = false;
  
  	/*
@@@ -568,8 -545,7 +568,8 @@@
  	 * bits we haven't explicitly enabled so we mask the
  	 * value to insure we only process bits we requested.
  	 */
 -	ath9k_hw_getisr(ah, &status);	/* NB: clears ISR too */
 +	ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
 +	ath9k_debug_sync_cause(sc, sync_cause);
  	status &= ah->imask;	/* discard unasked-for bits */
  
  	/*
@@@ -603,8 -579,7 +603,8 @@@
  
  		goto chip_reset;
  	}
 -#ifdef CONFIG_PM_SLEEP
 +
 +#ifdef CONFIG_ATH9K_WOW
  	if (status & ATH9K_INT_BMISS) {
  		if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
  			ath_dbg(common, ANY, "during WoW we got a BMISS\n");
@@@ -613,8 -588,6 +613,8 @@@
  		}
  	}
  #endif
 +
 +
  	if (status & ATH9K_INT_SWBA)
  		tasklet_schedule(&sc->bcon_tasklet);
  
@@@ -654,7 -627,7 +654,7 @@@ chip_reset
  #undef SCHED_INTR
  }
  
 -static int ath_reset(struct ath_softc *sc)
 +int ath_reset(struct ath_softc *sc)
  {
  	int r;
  
@@@ -762,8 -735,6 +762,8 @@@ static int ath9k_start(struct ieee80211
  	 */
  	ath9k_cmn_init_crypto(sc->sc_ah);
  
 +	ath9k_hw_reset_tsf(ah);
 +
  	spin_unlock_bh(&sc->sc_pcu_lock);
  
  	mutex_unlock(&sc->mutex);
@@@ -994,8 -965,9 +994,9 @@@ void ath9k_calculate_iter_data(struct i
  	struct ath_common *common = ath9k_hw_common(ah);
  
  	/*
- 	 * Use the hardware MAC address as reference, the hardware uses it
- 	 * together with the BSSID mask when matching addresses.
+ 	 * Pick the MAC address of the first interface as the new hardware
+ 	 * MAC address. The hardware will use it together with the BSSID mask
+ 	 * when matching addresses.
  	 */
  	memset(iter_data, 0, sizeof(*iter_data));
  	memset(&iter_data->mask, 0xff, ETH_ALEN);
@@@ -1664,8 -1636,13 +1665,8 @@@ static void ath9k_bss_info_changed(stru
  	}
  
  	if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
 -	    (changed & BSS_CHANGED_BEACON_INT)) {
 -		if (ah->opmode == NL80211_IFTYPE_AP &&
 -		    bss_conf->enable_beacon)
 -			ath9k_set_tsfadjust(sc, vif);
 -		if (ath9k_allow_beacon_config(sc, vif))
 -			ath9k_beacon_config(sc, vif, changed);
 -	}
 +	    (changed & BSS_CHANGED_BEACON_INT))
 +		ath9k_beacon_config(sc, vif, changed);
  
  	if (changed & BSS_CHANGED_ERP_SLOT) {
  		if (bss_conf->use_short_slot)
@@@ -1841,31 -1818,13 +1842,31 @@@ static void ath9k_set_coverage_class(st
  	mutex_unlock(&sc->mutex);
  }
  
 +static bool ath9k_has_tx_pending(struct ath_softc *sc)
 +{
 +	int i, npend;
 +
 +	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
 +		if (!ATH_TXQ_SETUP(sc, i))
 +			continue;
 +
 +		if (!sc->tx.txq[i].axq_depth)
 +			continue;
 +
 +		npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
 +		if (npend)
 +			break;
 +	}
 +
 +	return !!npend;
 +}
 +
  static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
  {
  	struct ath_softc *sc = hw->priv;
  	struct ath_hw *ah = sc->sc_ah;
  	struct ath_common *common = ath9k_hw_common(ah);
 -	int timeout = 200; /* ms */
 -	int i, j;
 +	int timeout = HZ / 5; /* 200 ms */
  	bool drain_txq;
  
  	mutex_lock(&sc->mutex);
@@@ -1883,9 -1842,25 +1884,9 @@@
  		return;
  	}
  
 -	for (j = 0; j < timeout; j++) {
 -		bool npend = false;
 -
 -		if (j)
 -			usleep_range(1000, 2000);
 -
 -		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
 -			if (!ATH_TXQ_SETUP(sc, i))
 -				continue;
 -
 -			npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
 -
 -			if (npend)
 -				break;
 -		}
 -
 -		if (!npend)
 -		    break;
 -	}
 +	if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc),
 +			       timeout) > 0)
 +		drop = false;
  
  	if (drop) {
  		ath9k_ps_wakeup(sc);
@@@ -2047,6 -2022,333 +2048,6 @@@ static int ath9k_get_antenna(struct iee
  	return 0;
  }
  
 -#ifdef CONFIG_PM_SLEEP
 -
 -static void ath9k_wow_map_triggers(struct ath_softc *sc,
 -				   struct cfg80211_wowlan *wowlan,
 -				   u32 *wow_triggers)
 -{
 -	if (wowlan->disconnect)
 -		*wow_triggers |= AH_WOW_LINK_CHANGE |
 -				 AH_WOW_BEACON_MISS;
 -	if (wowlan->magic_pkt)
 -		*wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
 -
 -	if (wowlan->n_patterns)
 -		*wow_triggers |= AH_WOW_USER_PATTERN_EN;
 -
 -	sc->wow_enabled = *wow_triggers;
 -
 -}
 -
 -static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
 -{
 -	struct ath_hw *ah = sc->sc_ah;
 -	struct ath_common *common = ath9k_hw_common(ah);
 -	int pattern_count = 0;
 -	int i, byte_cnt;
 -	u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
 -	u8 dis_deauth_mask[MAX_PATTERN_SIZE];
 -
 -	memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
 -	memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
 -
 -	/*
 -	 * Create Dissassociate / Deauthenticate packet filter
 -	 *
 -	 *     2 bytes        2 byte    6 bytes   6 bytes  6 bytes
 -	 *  +--------------+----------+---------+--------+--------+----
 -	 *  + Frame Control+ Duration +   DA    +  SA    +  BSSID +
 -	 *  +--------------+----------+---------+--------+--------+----
 -	 *
 -	 * The above is the management frame format for disassociate/
 -	 * deauthenticate pattern, from this we need to match the first byte
 -	 * of 'Frame Control' and DA, SA, and BSSID fields
 -	 * (skipping 2nd byte of FC and Duration feild.
 -	 *
 -	 * Disassociate pattern
 -	 * --------------------
 -	 * Frame control = 00 00 1010
 -	 * DA, SA, BSSID = x:x:x:x:x:x
 -	 * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
 -	 *			    | x:x:x:x:x:x  -- 22 bytes
 -	 *
 -	 * Deauthenticate pattern
 -	 * ----------------------
 -	 * Frame control = 00 00 1100
 -	 * DA, SA, BSSID = x:x:x:x:x:x
 -	 * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
 -	 *			    | x:x:x:x:x:x  -- 22 bytes
 -	 */
 -
 -	/* Create Disassociate Pattern first */
 -
 -	byte_cnt = 0;
 -
 -	/* Fill out the mask with all FF's */
 -
 -	for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
 -		dis_deauth_mask[i] = 0xff;
 -
 -	/* copy the first byte of frame control field */
 -	dis_deauth_pattern[byte_cnt] = 0xa0;
 -	byte_cnt++;
 -
 -	/* skip 2nd byte of frame control and Duration field */
 -	byte_cnt += 3;
 -
 -	/*
 -	 * need not match the destination mac address, it can be a broadcast
 -	 * mac address or an unicast to this station
 -	 */
 -	byte_cnt += 6;
 -
 -	/* copy the source mac address */
 -	memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
 -
 -	byte_cnt += 6;
 -
 -	/* copy the bssid, its same as the source mac address */
 -
 -	memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
 -
 -	/* Create Disassociate pattern mask */
 -
 -	dis_deauth_mask[0] = 0xfe;
 -	dis_deauth_mask[1] = 0x03;
 -	dis_deauth_mask[2] = 0xc0;
 -
 -	ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
 -
 -	ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
 -				   pattern_count, byte_cnt);
 -
 -	pattern_count++;
 -	/*
 -	 * for de-authenticate pattern, only the first byte of the frame
 -	 * control field gets changed from 0xA0 to 0xC0
 -	 */
 -	dis_deauth_pattern[0] = 0xC0;
 -
 -	ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
 -				   pattern_count, byte_cnt);
 -
 -}
 -
 -static void ath9k_wow_add_pattern(struct ath_softc *sc,
 -				  struct cfg80211_wowlan *wowlan)
 -{
 -	struct ath_hw *ah = sc->sc_ah;
 -	struct ath9k_wow_pattern *wow_pattern = NULL;
 -	struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
 -	int mask_len;
 -	s8 i = 0;
 -
 -	if (!wowlan->n_patterns)
 -		return;
 -
 -	/*
 -	 * Add the new user configured patterns
 -	 */
 -	for (i = 0; i < wowlan->n_patterns; i++) {
 -
 -		wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
 -
 -		if (!wow_pattern)
 -			return;
 -
 -		/*
 -		 * TODO: convert the generic user space pattern to
 -		 * appropriate chip specific/802.11 pattern.
 -		 */
 -
 -		mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
 -		memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
 -		memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
 -		memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
 -		       patterns[i].pattern_len);
 -		memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
 -		wow_pattern->pattern_len = patterns[i].pattern_len;
 -
 -		/*
 -		 * just need to take care of deauth and disssoc pattern,
 -		 * make sure we don't overwrite them.
 -		 */
 -
 -		ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
 -					   wow_pattern->mask_bytes,
 -					   i + 2,
 -					   wow_pattern->pattern_len);
 -		kfree(wow_pattern);
 -
 -	}
 -
 -}
 -
 -static int ath9k_suspend(struct ieee80211_hw *hw,
 -			 struct cfg80211_wowlan *wowlan)
 -{
 -	struct ath_softc *sc = hw->priv;
 -	struct ath_hw *ah = sc->sc_ah;
 -	struct ath_common *common = ath9k_hw_common(ah);
 -	u32 wow_triggers_enabled = 0;
 -	int ret = 0;
 -
 -	mutex_lock(&sc->mutex);
 -
 -	ath_cancel_work(sc);
 -	ath_stop_ani(sc);
 -	del_timer_sync(&sc->rx_poll_timer);
 -
 -	if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
 -		ath_dbg(common, ANY, "Device not present\n");
 -		ret = -EINVAL;
 -		goto fail_wow;
 -	}
 -
 -	if (WARN_ON(!wowlan)) {
 -		ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
 -		ret = -EINVAL;
 -		goto fail_wow;
 -	}
 -
 -	if (!device_can_wakeup(sc->dev)) {
 -		ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
 -		ret = 1;
 -		goto fail_wow;
 -	}
 -
 -	/*
 -	 * none of the sta vifs are associated
 -	 * and we are not currently handling multivif
 -	 * cases, for instance we have to seperately
 -	 * configure 'keep alive frame' for each
 -	 * STA.
 -	 */
 -
 -	if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
 -		ath_dbg(common, WOW, "None of the STA vifs are associated\n");
 -		ret = 1;
 -		goto fail_wow;
 -	}
 -
 -	if (sc->nvifs > 1) {
 -		ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
 -		ret = 1;
 -		goto fail_wow;
 -	}
 -
 -	ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
 -
 -	ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
 -		wow_triggers_enabled);
 -
 -	ath9k_ps_wakeup(sc);
 -
 -	ath9k_stop_btcoex(sc);
 -
 -	/*
 -	 * Enable wake up on recieving disassoc/deauth
 -	 * frame by default.
 -	 */
 -	ath9k_wow_add_disassoc_deauth_pattern(sc);
 -
 -	if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
 -		ath9k_wow_add_pattern(sc, wowlan);
 -
 -	spin_lock_bh(&sc->sc_pcu_lock);
 -	/*
 -	 * To avoid false wake, we enable beacon miss interrupt only
 -	 * when we go to sleep. We save the current interrupt mask
 -	 * so we can restore it after the system wakes up
 -	 */
 -	sc->wow_intr_before_sleep = ah->imask;
 -	ah->imask &= ~ATH9K_INT_GLOBAL;
 -	ath9k_hw_disable_interrupts(ah);
 -	ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
 -	ath9k_hw_set_interrupts(ah);
 -	ath9k_hw_enable_interrupts(ah);
 -
 -	spin_unlock_bh(&sc->sc_pcu_lock);
 -
 -	/*
 -	 * we can now sync irq and kill any running tasklets, since we already
 -	 * disabled interrupts and not holding a spin lock
 -	 */
 -	synchronize_irq(sc->irq);
 -	tasklet_kill(&sc->intr_tq);
 -
 -	ath9k_hw_wow_enable(ah, wow_triggers_enabled);
 -
 -	ath9k_ps_restore(sc);
 -	ath_dbg(common, ANY, "WoW enabled in ath9k\n");
 -	atomic_inc(&sc->wow_sleep_proc_intr);
 -
 -fail_wow:
 -	mutex_unlock(&sc->mutex);
 -	return ret;
 -}
 -
 -static int ath9k_resume(struct ieee80211_hw *hw)
 -{
 -	struct ath_softc *sc = hw->priv;
 -	struct ath_hw *ah = sc->sc_ah;
 -	struct ath_common *common = ath9k_hw_common(ah);
 -	u32 wow_status;
 -
 -	mutex_lock(&sc->mutex);
 -
 -	ath9k_ps_wakeup(sc);
 -
 -	spin_lock_bh(&sc->sc_pcu_lock);
 -
 -	ath9k_hw_disable_interrupts(ah);
 -	ah->imask = sc->wow_intr_before_sleep;
 -	ath9k_hw_set_interrupts(ah);
 -	ath9k_hw_enable_interrupts(ah);
 -
 -	spin_unlock_bh(&sc->sc_pcu_lock);
 -
 -	wow_status = ath9k_hw_wow_wakeup(ah);
 -
 -	if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
 -		/*
 -		 * some devices may not pick beacon miss
 -		 * as the reason they woke up so we add
 -		 * that here for that shortcoming.
 -		 */
 -		wow_status |= AH_WOW_BEACON_MISS;
 -		atomic_dec(&sc->wow_got_bmiss_intr);
 -		ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
 -	}
 -
 -	atomic_dec(&sc->wow_sleep_proc_intr);
 -
 -	if (wow_status) {
 -		ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
 -			ath9k_hw_wow_event_to_string(wow_status), wow_status);
 -	}
 -
 -	ath_restart_work(sc);
 -	ath9k_start_btcoex(sc);
 -
 -	ath9k_ps_restore(sc);
 -	mutex_unlock(&sc->mutex);
 -
 -	return 0;
 -}
 -
 -static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
 -{
 -	struct ath_softc *sc = hw->priv;
 -
 -	mutex_lock(&sc->mutex);
 -	device_init_wakeup(sc->dev, 1);
 -	device_set_wakeup_enable(sc->dev, enabled);
 -	mutex_unlock(&sc->mutex);
 -}
 -
 -#endif
  static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
  {
  	struct ath_softc *sc = hw->priv;
@@@ -2072,6 -2374,134 +2073,6 @@@ static void ath9k_channel_switch_beacon
  	sc->csa_vif = vif;
  }
  
 -static void ath9k_tx99_stop(struct ath_softc *sc)
 -{
 -	struct ath_hw *ah = sc->sc_ah;
 -	struct ath_common *common = ath9k_hw_common(ah);
 -
 -	ath_drain_all_txq(sc);
 -	ath_startrecv(sc);
 -
 -	ath9k_hw_set_interrupts(ah);
 -	ath9k_hw_enable_interrupts(ah);
 -
 -	ieee80211_wake_queues(sc->hw);
 -
 -	kfree_skb(sc->tx99_skb);
 -	sc->tx99_skb = NULL;
 -	sc->tx99_state = false;
 -
 -	ath9k_hw_tx99_stop(sc->sc_ah);
 -	ath_dbg(common, XMIT, "TX99 stopped\n");
 -}
 -
 -static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
 -{
 -	static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
 -			       0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
 -			       0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
 -			       0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
 -			       0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
 -			       0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
 -			       0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
 -			       0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
 -	u32 len = 1200;
 -	struct ieee80211_hw *hw = sc->hw;
 -	struct ieee80211_hdr *hdr;
 -	struct ieee80211_tx_info *tx_info;
 -	struct sk_buff *skb;
 -
 -	skb = alloc_skb(len, GFP_KERNEL);
 -	if (!skb)
 -		return NULL;
 -
 -	skb_put(skb, len);
 -
 -	memset(skb->data, 0, len);
 -
 -	hdr = (struct ieee80211_hdr *)skb->data;
 -	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
 -	hdr->duration_id = 0;
 -
 -	memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
 -	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
 -	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
 -
 -	hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
 -
 -	tx_info = IEEE80211_SKB_CB(skb);
 -	memset(tx_info, 0, sizeof(*tx_info));
 -	tx_info->band = hw->conf.chandef.chan->band;
 -	tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
 -	tx_info->control.vif = sc->tx99_vif;
 -
 -	memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
 -
 -	return skb;
 -}
 -
 -void ath9k_tx99_deinit(struct ath_softc *sc)
 -{
 -	ath_reset(sc);
 -
 -	ath9k_ps_wakeup(sc);
 -	ath9k_tx99_stop(sc);
 -	ath9k_ps_restore(sc);
 -}
 -
 -int ath9k_tx99_init(struct ath_softc *sc)
 -{
 -	struct ieee80211_hw *hw = sc->hw;
 -	struct ath_hw *ah = sc->sc_ah;
 -	struct ath_common *common = ath9k_hw_common(ah);
 -	struct ath_tx_control txctl;
 -	int r;
 -
 -	if (sc->sc_flags & SC_OP_INVALID) {
 -		ath_err(common,
 -			"driver is in invalid state unable to use TX99");
 -		return -EINVAL;
 -	}
 -
 -	sc->tx99_skb = ath9k_build_tx99_skb(sc);
 -	if (!sc->tx99_skb)
 -		return -ENOMEM;
 -
 -	memset(&txctl, 0, sizeof(txctl));
 -	txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
 -
 -	ath_reset(sc);
 -
 -	ath9k_ps_wakeup(sc);
 -
 -	ath9k_hw_disable_interrupts(ah);
 -	atomic_set(&ah->intr_ref_cnt, -1);
 -	ath_drain_all_txq(sc);
 -	ath_stoprecv(sc);
 -
 -	sc->tx99_state = true;
 -
 -	ieee80211_stop_queues(hw);
 -
 -	if (sc->tx99_power == MAX_RATE_POWER + 1)
 -		sc->tx99_power = MAX_RATE_POWER;
 -
 -	ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
 -	r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
 -	if (r) {
 -		ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
 -		return r;
 -	}
 -
 -	ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
 -		sc->tx99_power,
 -		sc->tx99_power / 2);
 -
 -	/* We leave the harware awake as it will be chugging on */
 -
 -	return 0;
 -}
 -
  struct ieee80211_ops ath9k_ops = {
  	.tx 		    = ath9k_tx,
  	.start 		    = ath9k_start,
@@@ -2102,7 -2532,7 +2103,7 @@@
  	.set_antenna	    = ath9k_set_antenna,
  	.get_antenna	    = ath9k_get_antenna,
  
 -#ifdef CONFIG_PM_SLEEP
 +#ifdef CONFIG_ATH9K_WOW
  	.suspend	    = ath9k_suspend,
  	.resume		    = ath9k_resume,
  	.set_wakeup	    = ath9k_set_wakeup,
diff --combined drivers/net/wireless/rtlwifi/pci.c
index 8707d1a,5a53195..d7aa165
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@@ -688,6 -688,8 +688,6 @@@ static void _rtl_receive_one(struct iee
  		rtlpriv->stats.rxbytesunicast += skb->len;
  	}
  
 -	rtl_is_special_data(hw, skb, false);
 -
  	if (ieee80211_is_data(fc)) {
  		rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
  
@@@ -738,6 -740,8 +738,8 @@@ static void _rtl_pci_rx_interrupt(struc
  	};
  	int index = rtlpci->rx_ring[rx_queue_idx].idx;
  
+ 	if (rtlpci->driver_is_goingto_unload)
+ 		return;
  	/*RX NORMAL PKT */
  	while (count--) {
  		/*rx descriptor */
@@@ -1634,6 -1638,7 +1636,7 @@@ static void rtl_pci_stop(struct ieee802
  	 */
  	set_hal_stop(rtlhal);
  
+ 	rtlpci->driver_is_goingto_unload = true;
  	rtlpriv->cfg->ops->disable_interrupt(hw);
  	cancel_work_sync(&rtlpriv->works.lps_change_work);
  
@@@ -1651,7 -1656,6 +1654,6 @@@
  	ppsc->rfchange_inprogress = true;
  	spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
  
- 	rtlpci->driver_is_goingto_unload = true;
  	rtlpriv->cfg->ops->hw_disable(hw);
  	/* some things are not needed if firmware not available */
  	if (!rtlpriv->max_fw_size)
diff --combined drivers/net/xen-netback/common.h
index ba30a6d,c47794b..c955fc3
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@@ -101,6 -101,13 +101,13 @@@ struct xenvif_rx_meta 
  
  #define MAX_PENDING_REQS 256
  
+ /* It's possible for an skb to have a maximal number of frags
+  * but still be less than MAX_BUFFER_OFFSET in size. Thus the
+  * worst-case number of copy operations is MAX_SKB_FRAGS per
+  * ring slot.
+  */
+ #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+ 
  struct xenvif {
  	/* Unique identifier for this interface. */
  	domid_t          domid;
@@@ -136,18 -143,20 +143,18 @@@
  	char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
  	struct xen_netif_rx_back_ring rx;
  	struct sk_buff_head rx_queue;
 -
 -	/* Allow xenvif_start_xmit() to peek ahead in the rx request
 -	 * ring.  This is a prediction of what rx_req_cons will be
 -	 * once all queued skbs are put on the ring.
 +	/* Set when the RX interrupt is triggered by the frontend.
 +	 * The worker thread may need to wake the queue.
  	 */
 -	RING_IDX rx_req_cons_peek;
 +	bool rx_event;
  
- 	/* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
- 	 * head/fragment page uses 2 copy operations because it
- 	 * straddles two buffers in the frontend.
- 	 */
- 	struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
- 	struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
+ 	/* This array is allocated seperately as it is large */
+ 	struct gnttab_copy *grant_copy_op;
  
+ 	/* We create one meta structure per ring request we consume, so
+ 	 * the maximum number is the same as the ring size.
+ 	 */
+ 	struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
  
  	u8               fe_dev_addr[6];
  
@@@ -196,6 -205,8 +203,6 @@@ void xenvif_xenbus_fini(void)
  
  int xenvif_schedulable(struct xenvif *vif);
  
 -int xenvif_rx_ring_full(struct xenvif *vif);
 -
  int xenvif_must_stop_queue(struct xenvif *vif);
  
  /* (Un)Map communication rings. */
@@@ -207,20 -218,21 +214,20 @@@ int xenvif_map_frontend_rings(struct xe
  /* Check for SKBs from frontend and schedule backend processing */
  void xenvif_check_rx_xenvif(struct xenvif *vif);
  
  /* Prevent the device from generating any further traffic. */
  void xenvif_carrier_off(struct xenvif *vif);
  
 -/* Returns number of ring slots required to send an skb to the frontend */
 -unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
 -
  int xenvif_tx_action(struct xenvif *vif, int budget);
 -void xenvif_rx_action(struct xenvif *vif);
  
  int xenvif_kthread(void *data);
 +void xenvif_kick_thread(struct xenvif *vif);
 +
 +/* Determine whether the needed number of slots (req) are available,
 + * and set req_event if not.
 + */
 +bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
 +
 +void xenvif_stop_queue(struct xenvif *vif);
  
  extern bool separate_tx_rx_irq;
  
diff --combined drivers/net/xen-netback/interface.c
index 1dcb960,fff8cdd..b9de31e
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@@ -34,6 -34,7 +34,7 @@@
  #include <linux/ethtool.h>
  #include <linux/rtnetlink.h>
  #include <linux/if_vlan.h>
+ #include <linux/vmalloc.h>
  
  #include <xen/events.h>
  #include <asm/xen/hypercall.h>
@@@ -46,6 -47,11 +47,6 @@@ int xenvif_schedulable(struct xenvif *v
  	return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
  }
  
 -static int xenvif_rx_schedulable(struct xenvif *vif)
 -{
 -	return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
 -}
 -
  static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
  {
  	struct xenvif *vif = dev_id;
@@@ -99,8 -105,8 +100,8 @@@ static irqreturn_t xenvif_rx_interrupt(
  {
  	struct xenvif *vif = dev_id;
  
 -	if (xenvif_rx_schedulable(vif))
 -		netif_wake_queue(vif->dev);
 +	vif->rx_event = true;
 +	xenvif_kick_thread(vif);
  
  	return IRQ_HANDLED;
  }
@@@ -116,35 -122,24 +117,35 @@@ static irqreturn_t xenvif_interrupt(in
  static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
  {
  	struct xenvif *vif = netdev_priv(dev);
 +	int min_slots_needed;
  
  	BUG_ON(skb->dev != dev);
  
  	/* Drop the packet if vif is not ready */
 -	if (vif->task == NULL)
 +	if (vif->task == NULL || !xenvif_schedulable(vif))
  		goto drop;
  
 -	/* Drop the packet if the target domain has no receive buffers. */
 -	if (!xenvif_rx_schedulable(vif))
 -		goto drop;
 +	/* At best we'll need one slot for the header and one for each
 +	 * frag.
 +	 */
 +	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
  
 -	/* Reserve ring slots for the worst-case number of fragments. */
 -	vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
 +	/* If the skb is GSO then we'll also need an extra slot for the
 +	 * metadata.
 +	 */
 +	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
 +	    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
 +		min_slots_needed++;
  
 -	if (vif->can_queue && xenvif_must_stop_queue(vif))
 -		netif_stop_queue(dev);
 +	/* If the skb can't possibly fit in the remaining slots
 +	 * then turn off the queue to give the ring a chance to
 +	 * drain.
 +	 */
 +	if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
 +		xenvif_stop_queue(vif);
  
 -	xenvif_queue_tx_skb(vif, skb);
 +	skb_queue_tail(&vif->rx_queue, skb);
 +	xenvif_kick_thread(vif);
  
  	return NETDEV_TX_OK;
  
@@@ -154,6 -149,12 +155,6 @@@
  	return NETDEV_TX_OK;
  }
  
 -void xenvif_notify_tx_completion(struct xenvif *vif)
 -{
 -	if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
 -		netif_wake_queue(vif->dev);
 -}
 -
  static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
  {
  	struct xenvif *vif = netdev_priv(dev);
@@@ -307,6 -308,15 +308,15 @@@ struct xenvif *xenvif_alloc(struct devi
  	SET_NETDEV_DEV(dev, parent);
  
  	vif = netdev_priv(dev);
+ 
+ 	vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
+ 				     MAX_GRANT_COPY_OPS);
+ 	if (vif->grant_copy_op == NULL) {
+ 		pr_warn("Could not allocate grant copy space for %s\n", name);
+ 		free_netdev(dev);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
  	vif->domid  = domid;
  	vif->handle = handle;
  	vif->can_sg = 1;
@@@ -378,8 -388,6 +388,8 @@@ int xenvif_connect(struct xenvif *vif, 
  	if (err < 0)
  		goto err;
  
 +	init_waitqueue_head(&vif->wq);
 +
  	if (tx_evtchn == rx_evtchn) {
  		/* feature-split-event-channels == 0 */
  		err = bind_interdomain_evtchn_to_irqhandler(
@@@ -412,6 -420,7 +422,6 @@@
  		disable_irq(vif->rx_irq);
  	}
  
 -	init_waitqueue_head(&vif->wq);
  	task = kthread_create(xenvif_kthread,
  			      (void *)vif, "%s", vif->dev->name);
  	if (IS_ERR(task)) {
@@@ -488,6 -497,7 +498,7 @@@ void xenvif_free(struct xenvif *vif
  
  	unregister_netdev(vif->dev);
  
+ 	vfree(vif->grant_copy_op);
  	free_netdev(vif->dev);
  
  	module_put(THIS_MODULE);
diff --combined drivers/net/xen-netback/netback.c
index 611aebe,7842555..4f81ac0
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@@ -138,26 -138,36 +138,26 @@@ static inline pending_ring_idx_t nr_pen
  		vif->pending_prod + vif->pending_cons;
  }
  
 -static int max_required_rx_slots(struct xenvif *vif)
 +bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
  {
 -	int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
 +	RING_IDX prod, cons;
  
 -	/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
 -	if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
 -		max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
 -
 -	return max;
 -}
 -
 -int xenvif_rx_ring_full(struct xenvif *vif)
 -{
 -	RING_IDX peek   = vif->rx_req_cons_peek;
 -	RING_IDX needed = max_required_rx_slots(vif);
 +	do {
 +		prod = vif->rx.sring->req_prod;
 +		cons = vif->rx.req_cons;
  
 -	return ((vif->rx.sring->req_prod - peek) < needed) ||
 -	       ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
 -}
 +		if (prod - cons >= needed)
 +			return true;
  
 -int xenvif_must_stop_queue(struct xenvif *vif)
 -{
 -	if (!xenvif_rx_ring_full(vif))
 -		return 0;
 +		vif->rx.sring->req_event = prod + 1;
  
 -	vif->rx.sring->req_event = vif->rx_req_cons_peek +
 -		max_required_rx_slots(vif);
 -	mb(); /* request notification /then/ check the queue */
 +		/* Make sure event is visible before we check prod
 +		 * again.
 +		 */
 +		mb();
 +	} while (vif->rx.sring->req_prod != prod);
  
 -	return xenvif_rx_ring_full(vif);
 +	return false;
  }
  
  /*
@@@ -200,6 -210,93 +200,6 @@@ static bool start_new_rx_buffer(int off
  	return false;
  }
  
 -struct xenvif_count_slot_state {
 -	unsigned long copy_off;
 -	bool head;
 -};
 -
 -unsigned int xenvif_count_frag_slots(struct xenvif *vif,
 -				     unsigned long offset, unsigned long size,
 -				     struct xenvif_count_slot_state *state)
 -{
 -	unsigned count = 0;
 -
 -	offset &= ~PAGE_MASK;
 -
 -	while (size > 0) {
 -		unsigned long bytes;
 -
 -		bytes = PAGE_SIZE - offset;
 -
 -		if (bytes > size)
 -			bytes = size;
 -
 -		if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
 -			count++;
 -			state->copy_off = 0;
 -		}
 -
 -		if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
 -			bytes = MAX_BUFFER_OFFSET - state->copy_off;
 -
 -		state->copy_off += bytes;
 -
 -		offset += bytes;
 -		size -= bytes;
 -
 -		if (offset == PAGE_SIZE)
 -			offset = 0;
 -
 -		state->head = false;
 -	}
 -
 -	return count;
 -}
 -
 -/*
 - * Figure out how many ring slots we're going to need to send @skb to
 - * the guest. This function is essentially a dry run of
 - * xenvif_gop_frag_copy.
 - */
 -unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
 -{
 -	struct xenvif_count_slot_state state;
 -	unsigned int count;
 -	unsigned char *data;
 -	unsigned i;
 -
 -	state.head = true;
 -	state.copy_off = 0;
 -
 -	/* Slot for the first (partial) page of data. */
 -	count = 1;
 -
 -	/* Need a slot for the GSO prefix for GSO extra data? */
 -	if (skb_shinfo(skb)->gso_size)
 -		count++;
 -
 -	data = skb->data;
 -	while (data < skb_tail_pointer(skb)) {
 -		unsigned long offset = offset_in_page(data);
 -		unsigned long size = PAGE_SIZE - offset;
 -
 -		if (data + size > skb_tail_pointer(skb))
 -			size = skb_tail_pointer(skb) - data;
 -
 -		count += xenvif_count_frag_slots(vif, offset, size, &state);
 -
 -		data += size;
 -	}
 -
 -	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 -		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
 -		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
 -
 -		count += xenvif_count_frag_slots(vif, offset, size, &state);
 -	}
 -	return count;
 -}
 -
  struct netrx_pending_operations {
  	unsigned copy_prod, copy_cons;
  	unsigned meta_prod, meta_cons;
@@@ -460,12 -557,12 +460,12 @@@ struct skb_cb_overlay 
  	int meta_slots_used;
  };
  
 -static void xenvif_kick_thread(struct xenvif *vif)
 +void xenvif_kick_thread(struct xenvif *vif)
  {
  	wake_up(&vif->wq);
  }
  
 -void xenvif_rx_action(struct xenvif *vif)
 +static void xenvif_rx_action(struct xenvif *vif)
  {
  	s8 status;
  	u16 flags;
@@@ -474,6 -571,8 +474,6 @@@
  	struct sk_buff *skb;
  	LIST_HEAD(notify);
  	int ret;
 -	int nr_frags;
 -	int count;
  	unsigned long offset;
  	struct skb_cb_overlay *sco;
  	int need_to_notify = 0;
@@@ -485,51 -584,38 +485,51 @@@
  
  	skb_queue_head_init(&rxq);
  
 -	count = 0;
 -
  	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
 -		vif = netdev_priv(skb->dev);
 -		nr_frags = skb_shinfo(skb)->nr_frags;
 +		int max_slots_needed;
 +		int i;
 +
 +		/* We need a cheap worse case estimate for the number of
 +		 * slots we'll use.
 +		 */
 +
 +		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
 +						skb_headlen(skb),
 +						PAGE_SIZE);
 +		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 +			unsigned int size;
 +			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
 +			max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
 +		}
 +		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
 +		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
 +			max_slots_needed++;
 +
 +		/* If the skb may not fit then bail out now */
 +		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
 +			skb_queue_head(&vif->rx_queue, skb);
 +			need_to_notify = 1;
 +			break;
 +		}
  
  		sco = (struct skb_cb_overlay *)skb->cb;
  		sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
 -
 -		count += nr_frags + 1;
 +		BUG_ON(sco->meta_slots_used > max_slots_needed);
  
  		__skb_queue_tail(&rxq, skb);
 -
 -		/* Filled the batch queue? */
 -		/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
 -		if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
 -			break;
  	}
  
  	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
  
  	if (!npo.copy_prod)
 -		return;
 +		goto done;
  
- 	BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+ 	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
  	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
  
  	while ((skb = __skb_dequeue(&rxq)) != NULL) {
  		sco = (struct skb_cb_overlay *)skb->cb;
  
 -		vif = netdev_priv(skb->dev);
 -
  		if ((1 << vif->meta[npo.meta_cons].gso_type) &
  		    vif->gso_prefix_mask) {
  			resp = RING_GET_RESPONSE(&vif->rx,
@@@ -595,13 -681,25 +595,13 @@@
  		if (ret)
  			need_to_notify = 1;
  
 -		xenvif_notify_tx_completion(vif);
 -
  		npo.meta_cons += sco->meta_slots_used;
  		dev_kfree_skb(skb);
  	}
  
 +done:
  	if (need_to_notify)
  		notify_remote_via_irq(vif->rx_irq);
 -
 -	/* More work to do? */
 -	if (!skb_queue_empty(&vif->rx_queue))
 -		xenvif_kick_thread(vif);
 -}
 -
 -void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
 -{
 -	skb_queue_tail(&vif->rx_queue, skb);
 -
 -	xenvif_kick_thread(vif);
  }
  
  void xenvif_check_rx_xenvif(struct xenvif *vif)
@@@ -1043,7 -1141,10 +1043,7 @@@ static int xenvif_set_skb_gso(struct xe
  	}
  
  	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 -
 -	/* Header must be checked, and gso_segs computed. */
 -	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 -	skb_shinfo(skb)->gso_segs = 0;
 +	/* gso_segs will be calculated later */
  
  	return 0;
  }
@@@ -1108,8 -1209,10 +1108,10 @@@ static int checksum_setup_ip(struct xen
  			goto out;
  
  		if (!skb_partial_csum_set(skb, off,
- 					  offsetof(struct tcphdr, check)))
+ 					  offsetof(struct tcphdr, check))) {
+ 			err = -EPROTO;
  			goto out;
+ 		}
  
  		if (recalculate_partial_csum)
  			tcp_hdr(skb)->check =
@@@ -1126,8 -1229,10 +1128,10 @@@
  			goto out;
  
  		if (!skb_partial_csum_set(skb, off,
- 					  offsetof(struct udphdr, check)))
+ 					  offsetof(struct udphdr, check))) {
+ 			err = -EPROTO;
  			goto out;
+ 		}
  
  		if (recalculate_partial_csum)
  			udp_hdr(skb)->check =
@@@ -1249,8 -1354,10 +1253,10 @@@ static int checksum_setup_ipv6(struct x
  			goto out;
  
  		if (!skb_partial_csum_set(skb, off,
- 					  offsetof(struct tcphdr, check)))
+ 					  offsetof(struct tcphdr, check))) {
+ 			err = -EPROTO;
  			goto out;
+ 		}
  
  		if (recalculate_partial_csum)
  			tcp_hdr(skb)->check =
@@@ -1267,8 -1374,10 +1273,10 @@@
  			goto out;
  
  		if (!skb_partial_csum_set(skb, off,
- 					  offsetof(struct udphdr, check)))
+ 					  offsetof(struct udphdr, check))) {
+ 			err = -EPROTO;
  			goto out;
+ 		}
  
  		if (recalculate_partial_csum)
  			udp_hdr(skb)->check =
@@@ -1578,20 -1687,6 +1586,20 @@@ static int xenvif_tx_submit(struct xenv
  
  		skb_probe_transport_header(skb, 0);
  
 +		/* If the packet is GSO then we will have just set up the
 +		 * transport header offset in checksum_setup so it's now
 +		 * straightforward to calculate gso_segs.
 +		 */
 +		if (skb_is_gso(skb)) {
 +			int mss = skb_shinfo(skb)->gso_size;
 +			int hdrlen = skb_transport_header(skb) -
 +				skb_mac_header(skb) +
 +				tcp_hdrlen(skb);
 +
 +			skb_shinfo(skb)->gso_segs =
 +				DIV_ROUND_UP(skb->len - hdrlen, mss);
 +		}
 +
  		vif->dev->stats.rx_bytes += skb->len;
  		vif->dev->stats.rx_packets++;
  
@@@ -1716,7 -1811,7 +1724,7 @@@ static struct xen_netif_rx_response *ma
  
  static inline int rx_work_todo(struct xenvif *vif)
  {
 -	return !skb_queue_empty(&vif->rx_queue);
 +	return !skb_queue_empty(&vif->rx_queue) || vif->rx_event;
  }
  
  static inline int tx_work_todo(struct xenvif *vif)
@@@ -1766,6 -1861,8 +1774,6 @@@ int xenvif_map_frontend_rings(struct xe
  	rxs = (struct xen_netif_rx_sring *)addr;
  	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
  
 -	vif->rx_req_cons_peek = 0;
 -
  	return 0;
  
  err:
@@@ -1773,24 -1870,9 +1781,24 @@@
  	return err;
  }
  
 +void xenvif_stop_queue(struct xenvif *vif)
 +{
 +	if (!vif->can_queue)
 +		return;
 +
 +	netif_stop_queue(vif->dev);
 +}
 +
 +static void xenvif_start_queue(struct xenvif *vif)
 +{
 +	if (xenvif_schedulable(vif))
 +		netif_wake_queue(vif->dev);
 +}
 +
  int xenvif_kthread(void *data)
  {
  	struct xenvif *vif = data;
 +	struct sk_buff *skb;
  
  	while (!kthread_should_stop()) {
  		wait_event_interruptible(vif->wq,
@@@ -1799,22 -1881,12 +1807,22 @@@
  		if (kthread_should_stop())
  			break;
  
 -		if (rx_work_todo(vif))
 +		if (!skb_queue_empty(&vif->rx_queue))
  			xenvif_rx_action(vif);
  
 +		vif->rx_event = false;
 +
 +		if (skb_queue_empty(&vif->rx_queue) &&
 +		    netif_queue_stopped(vif->dev))
 +			xenvif_start_queue(vif);
 +
  		cond_resched();
  	}
  
 +	/* Bin any remaining skbs */
 +	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
 +		dev_kfree_skb(skb);
 +
  	return 0;
  }
  
diff --combined include/linux/netdevice.h
index 0c30af3,5faaadb..d9c961a
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@@ -1283,9 -1283,6 +1283,9 @@@ struct net_device 
  #if IS_ENABLED(CONFIG_NET_DSA)
  	struct dsa_switch_tree	*dsa_ptr;	/* dsa specific data */
  #endif
 +#if IS_ENABLED(CONFIG_TIPC)
 +	struct tipc_bearer __rcu *tipc_ptr;	/* TIPC specific data */
 +#endif
  	void 			*atalk_ptr;	/* AppleTalk link 	*/
  	struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/
  	struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
@@@ -1409,7 -1406,7 +1409,7 @@@
  	union {
  		void				*ml_priv;
  		struct pcpu_lstats __percpu	*lstats; /* loopback stats */
 -		struct pcpu_tstats __percpu	*tstats; /* tunnel stats */
 +		struct pcpu_sw_netstats __percpu	*tstats;
  		struct pcpu_dstats __percpu	*dstats; /* dummy stats */
  		struct pcpu_vstats __percpu	*vstats; /* veth stats */
  	};
@@@ -1444,7 -1441,7 +1444,7 @@@
  	/* max exchange id for FCoE LRO by ddp */
  	unsigned int		fcoe_ddp_xid;
  #endif
 -#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
 +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  	struct netprio_map __rcu *priomap;
  #endif
  	/* phy device may attach itself for hardware timestamping */
@@@ -1676,7 -1673,7 +1676,7 @@@ struct offload_callbacks 
  	int			(*gso_send_check)(struct sk_buff *skb);
  	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
  					       struct sk_buff *skb);
 -	int			(*gro_complete)(struct sk_buff *skb);
 +	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
  };
  
  struct packet_offload {
@@@ -1685,15 -1682,6 +1685,15 @@@
  	struct list_head	 list;
  };
  
 +/* often modified stats are per cpu, other are shared (netdev->stats) */
 +struct pcpu_sw_netstats {
 +	u64     rx_packets;
 +	u64     rx_bytes;
 +	u64     tx_packets;
 +	u64     tx_bytes;
 +	struct u64_stats_sync   syncp;
 +};
 +
  #include <linux/notifier.h>
  
  /* netdevice notifier chain. Please remember to update the rtnetlink
@@@ -1750,6 -1738,8 +1750,6 @@@ netdev_notifier_info_to_dev(const struc
  	return info->dev;
  }
  
 -int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
 -				  struct netdev_notifier_info *info);
  int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  
  
@@@ -1816,6 -1806,7 +1816,6 @@@ void dev_remove_pack(struct packet_typ
  void __dev_remove_pack(struct packet_type *pt);
  void dev_add_offload(struct packet_offload *po);
  void dev_remove_offload(struct packet_offload *po);
 -void __dev_remove_offload(struct packet_offload *po);
  
  struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
  					unsigned short mask);
@@@ -1921,6 -1912,15 +1921,15 @@@ static inline int dev_parse_header(cons
  	return dev->header_ops->parse(skb, haddr);
  }
  
+ static inline int dev_rebuild_header(struct sk_buff *skb)
+ {
+ 	const struct net_device *dev = skb->dev;
+ 
+ 	if (!dev->header_ops || !dev->header_ops->rebuild)
+ 		return 0;
+ 	return dev->header_ops->rebuild(skb);
+ }
+ 
  typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
  static inline int unregister_gifconf(unsigned int family)
@@@ -2377,52 -2377,17 +2386,52 @@@ static inline int netif_copy_real_num_q
  #define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
  int netif_get_num_default_rss_queues(void);
  
 -/* Use this variant when it is known for sure that it
 - * is executing from hardware interrupt context or with hardware interrupts
 - * disabled.
 - */
 -void dev_kfree_skb_irq(struct sk_buff *skb);
 +enum skb_free_reason {
 +	SKB_REASON_CONSUMED,
 +	SKB_REASON_DROPPED,
 +};
 +
 +void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
 +void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
  
 -/* Use this variant in places where it could be invoked
 - * from either hardware interrupt or other context, with hardware interrupts
 - * either disabled or enabled.
 +/*
 + * It is not allowed to call kfree_skb() or consume_skb() from hardware
 + * interrupt context or with hardware interrupts being disabled.
 + * (in_irq() || irqs_disabled())
 + *
 + * We provide four helpers that can be used in following contexts :
 + *
 + * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
 + *  replacing kfree_skb(skb)
 + *
 + * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
 + *  Typically used in place of consume_skb(skb) in TX completion path
 + *
 + * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
 + *  replacing kfree_skb(skb)
 + *
 + * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
 + *  and consumed a packet. Used in place of consume_skb(skb)
   */
 -void dev_kfree_skb_any(struct sk_buff *skb);
 +static inline void dev_kfree_skb_irq(struct sk_buff *skb)
 +{
 +	__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
 +}
 +
 +static inline void dev_consume_skb_irq(struct sk_buff *skb)
 +{
 +	__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
 +}
 +
 +static inline void dev_kfree_skb_any(struct sk_buff *skb)
 +{
 +	__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
 +}
 +
 +static inline void dev_consume_skb_any(struct sk_buff *skb)
 +{
 +	__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
 +}
  
  int netif_rx(struct sk_buff *skb);
  int netif_rx_ni(struct sk_buff *skb);
@@@ -2816,10 -2781,17 +2825,10 @@@ int register_netdev(struct net_device *
  void unregister_netdev(struct net_device *dev);
  
  /* General hardware address lists handling functions */
 -int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
 -			   struct netdev_hw_addr_list *from_list,
 -			   int addr_len, unsigned char addr_type);
 -void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
 -			    struct netdev_hw_addr_list *from_list,
 -			    int addr_len, unsigned char addr_type);
  int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
  		   struct netdev_hw_addr_list *from_list, int addr_len);
  void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
  		      struct netdev_hw_addr_list *from_list, int addr_len);
 -void __hw_addr_flush(struct netdev_hw_addr_list *list);
  void __hw_addr_init(struct netdev_hw_addr_list *list);
  
  /* Functions used for device addresses handling */
@@@ -2827,6 -2799,10 +2836,6 @@@ int dev_addr_add(struct net_device *dev
  		 unsigned char addr_type);
  int dev_addr_del(struct net_device *dev, const unsigned char *addr,
  		 unsigned char addr_type);
 -int dev_addr_add_multiple(struct net_device *to_dev,
 -			  struct net_device *from_dev, unsigned char addr_type);
 -int dev_addr_del_multiple(struct net_device *to_dev,
 -			  struct net_device *from_dev, unsigned char addr_type);
  void dev_addr_flush(struct net_device *dev);
  int dev_addr_init(struct net_device *dev);
  
@@@ -2873,6 -2849,7 +2882,6 @@@ extern int		weight_p
  extern int		bpf_jit_enable;
  
  bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
 -bool netdev_has_any_upper_dev(struct net_device *dev);
  struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
  						     struct list_head **iter);
  
@@@ -2901,7 -2878,6 +2910,7 @@@ void *netdev_lower_get_next_private_rcu
  	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
  
  void *netdev_adjacent_get_private(struct list_head *adj_list);
 +void *netdev_lower_get_first_private_rcu(struct net_device *dev);
  struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
  struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
  int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
@@@ -2912,6 -2888,8 +2921,6 @@@ int netdev_master_upper_dev_link_privat
  					 void *private);
  void netdev_upper_dev_unlink(struct net_device *dev,
  			     struct net_device *upper_dev);
 -void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
 -				       struct net_device *lower_dev);
  void *netdev_lower_dev_get_private(struct net_device *dev,
  				   struct net_device *lower_dev);
  int skb_checksum_help(struct sk_buff *skb);
@@@ -3039,6 -3017,19 +3048,19 @@@ static inline void netif_set_gso_max_si
  	dev->gso_max_size = size;
  }
  
+ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+ 					int pulled_hlen, u16 mac_offset,
+ 					int mac_len)
+ {
+ 	skb->protocol = protocol;
+ 	skb->encapsulation = 1;
+ 	skb_push(skb, pulled_hlen);
+ 	skb_reset_transport_header(skb);
+ 	skb->mac_header = mac_offset;
+ 	skb->network_header = skb->mac_header + mac_len;
+ 	skb->mac_len = mac_len;
+ }
+ 
  static inline bool netif_is_macvlan(struct net_device *dev)
  {
  	return dev->priv_flags & IFF_MACVLAN;
diff --combined include/linux/skbuff.h
index c5cd016,6f69b3f..88d4f2e
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@@ -34,82 -34,11 +34,82 @@@
  #include <linux/netdev_features.h>
  #include <net/flow_keys.h>
  
 +/* A. Checksumming of received packets by device.
 + *
 + * CHECKSUM_NONE:
 + *
 + *   Device failed to checksum this packet e.g. due to lack of capabilities.
 + *   The packet contains full (though not verified) checksum in packet but
 + *   not in skb->csum. Thus, skb->csum is undefined in this case.
 + *
 + * CHECKSUM_UNNECESSARY:
 + *
 + *   The hardware you're dealing with doesn't calculate the full checksum
 + *   (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
 + *   for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will
 + *   set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still
 + *   undefined in this case though. It is a bad option, but, unfortunately,
 + *   nowadays most vendors do this. Apparently with the secret goal to sell
 + *   you new devices, when you will add new protocol to your host, f.e. IPv6 8)
 + *
 + * CHECKSUM_COMPLETE:
 + *
 + *   This is the most generic way. The device supplied checksum of the _whole_
 + *   packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
 + *   hardware doesn't need to parse L3/L4 headers to implement this.
 + *
 + *   Note: Even if device supports only some protocols, but is able to produce
 + *   skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
 + *
 + * CHECKSUM_PARTIAL:
 + *
 + *   This is identical to the case for output below. This may occur on a packet
 + *   received directly from another Linux OS, e.g., a virtualized Linux kernel
 + *   on the same host. The packet can be treated in the same way as
 + *   CHECKSUM_UNNECESSARY, except that on output (i.e., forwarding) the
 + *   checksum must be filled in by the OS or the hardware.
 + *
 + * B. Checksumming on output.
 + *
 + * CHECKSUM_NONE:
 + *
 + *   The skb was already checksummed by the protocol, or a checksum is not
 + *   required.
 + *
 + * CHECKSUM_PARTIAL:
 + *
 + *   The device is required to checksum the packet as seen by hard_start_xmit()
 + *   from skb->csum_start up to the end, and to record/write the checksum at
 + *   offset skb->csum_start + skb->csum_offset.
 + *
 + *   The device must show its capabilities in dev->features, set up at device
 + *   setup time, e.g. netdev_features.h:
 + *
 + *	NETIF_F_HW_CSUM	- It's a clever device, it's able to checksum everything.
 + *	NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over
 + *			  IPv4. Sigh. Vendors like this way for an unknown reason.
 + *			  Though, see comment above about CHECKSUM_UNNECESSARY. 8)
 + *	NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead.
 + *	NETIF_F_...     - Well, you get the picture.
 + *
 + * CHECKSUM_UNNECESSARY:
 + *
 + *   Normally, the device will do per protocol specific checksumming. Protocol
 + *   implementations that do not want the NIC to perform the checksum
 + *   calculation should use this flag in their outgoing skbs.
 + *
 + *	NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC
 + *			   offload. Correspondingly, the FCoE protocol driver
 + *			   stack should use CHECKSUM_UNNECESSARY.
 + *
 + * Any questions? No questions, good.		--ANK
 + */
 +
  /* Don't change this without changing skb_csum_unnecessary! */
 -#define CHECKSUM_NONE 0
 -#define CHECKSUM_UNNECESSARY 1
 -#define CHECKSUM_COMPLETE 2
 -#define CHECKSUM_PARTIAL 3
 +#define CHECKSUM_NONE		0
 +#define CHECKSUM_UNNECESSARY	1
 +#define CHECKSUM_COMPLETE	2
 +#define CHECKSUM_PARTIAL	3
  
  #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
  				 ~(SMP_CACHE_BYTES - 1))
@@@ -125,6 -54,58 +125,6 @@@
  			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
  			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  
 -/* A. Checksumming of received packets by device.
 - *
 - *	NONE: device failed to checksum this packet.
 - *		skb->csum is undefined.
 - *
 - *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
 - *		skb->csum is undefined.
 - *	      It is bad option, but, unfortunately, many of vendors do this.
 - *	      Apparently with secret goal to sell you new device, when you
 - *	      will add new protocol to your host. F.e. IPv6. 8)
 - *
 - *	COMPLETE: the most generic way. Device supplied checksum of _all_
 - *	    the packet as seen by netif_rx in skb->csum.
 - *	    NOTE: Even if device supports only some protocols, but
 - *	    is able to produce some skb->csum, it MUST use COMPLETE,
 - *	    not UNNECESSARY.
 - *
 - *	PARTIAL: identical to the case for output below.  This may occur
 - *	    on a packet received directly from another Linux OS, e.g.,
 - *	    a virtualised Linux kernel on the same host.  The packet can
 - *	    be treated in the same way as UNNECESSARY except that on
 - *	    output (i.e., forwarding) the checksum must be filled in
 - *	    by the OS or the hardware.
 - *
 - * B. Checksumming on output.
 - *
 - *	NONE: skb is checksummed by protocol or csum is not required.
 - *
 - *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
 - *	from skb->csum_start to the end and to record the checksum
 - *	at skb->csum_start + skb->csum_offset.
 - *
 - *	Device must show its capabilities in dev->features, set
 - *	at device setup time.
 - *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
 - *			  everything.
 - *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
 - *			  TCP/UDP over IPv4. Sigh. Vendors like this
 - *			  way by an unknown reason. Though, see comment above
 - *			  about CHECKSUM_UNNECESSARY. 8)
 - *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
 - *
 - *	UNNECESSARY: device will do per protocol specific csum. Protocol drivers
 - *	that do not want net to perform the checksum calculation should use
 - *	this flag in their outgoing skbs.
 - *	NETIF_F_FCOE_CRC  this indicates the device can do FCoE FC CRC
 - *			  offload. Correspondingly, the FCoE protocol driver
 - *			  stack should use CHECKSUM_UNNECESSARY.
 - *
 - *	Any questions? No questions, good. 		--ANK
 - */
 -
  struct net_device;
  struct scatterlist;
  struct pipe_inode_info;
@@@ -722,73 -703,15 +722,73 @@@ unsigned int skb_find_text(struct sk_bu
  			   unsigned int to, struct ts_config *config,
  			   struct ts_state *state);
  
 -void __skb_get_rxhash(struct sk_buff *skb);
 -static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 +/*
 + * Packet hash types specify the type of hash in skb_set_hash.
 + *
 + * Hash types refer to the protocol layer addresses which are used to
 + * construct a packet's hash. The hashes are used to differentiate or identify
 + * flows of the protocol layer for the hash type. Hash types are either
 + * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
 + *
 + * Properties of hashes:
 + *
 + * 1) Two packets in different flows have different hash values
 + * 2) Two packets in the same flow should have the same hash value
 + *
 + * A hash at a higher layer is considered to be more specific. A driver should
 + * set the most specific hash possible.
 + *
 + * A driver cannot indicate a more specific hash than the layer at which a hash
 + * was computed. For instance an L3 hash cannot be set as an L4 hash.
 + *
 + * A driver may indicate a hash level which is less specific than the
 + * actual layer the hash was computed on. For instance, a hash computed
 + * at L4 may be considered an L3 hash. This should only be done if the
 + * driver can't unambiguously determine that the HW computed the hash at
 + * the higher layer. Note that the "should" in the second property above
 + * permits this.
 + */
 +enum pkt_hash_types {
 +	PKT_HASH_TYPE_NONE,	/* Undefined type */
 +	PKT_HASH_TYPE_L2,	/* Input: src_MAC, dest_MAC */
 +	PKT_HASH_TYPE_L3,	/* Input: src_IP, dst_IP */
 +	PKT_HASH_TYPE_L4,	/* Input: src_IP, dst_IP, src_port, dst_port */
 +};
 +
 +static inline void
 +skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
 +{
 +	skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
 +	skb->rxhash = hash;
 +}
 +
 +void __skb_get_hash(struct sk_buff *skb);
 +static inline __u32 skb_get_hash(struct sk_buff *skb)
  {
  	if (!skb->l4_rxhash)
 -		__skb_get_rxhash(skb);
 +		__skb_get_hash(skb);
  
  	return skb->rxhash;
  }
  
 +static inline void skb_clear_hash(struct sk_buff *skb)
 +{
 +	skb->rxhash = 0;
 +	skb->l4_rxhash = 0;
 +}
 +
 +static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
 +{
 +	if (!skb->l4_rxhash)
 +		skb_clear_hash(skb);
 +}
 +
 +static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
 +{
 +	to->rxhash = from->rxhash;
 +	to->l4_rxhash = from->l4_rxhash;
 +};
 +
  #ifdef NET_SKBUFF_DATA_USES_OFFSET
  static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
  {
@@@ -1715,6 -1638,11 +1715,11 @@@ static inline void skb_set_mac_header(s
  	skb->mac_header += offset;
  }
  
+ static inline void skb_pop_mac_header(struct sk_buff *skb)
+ {
+ 	skb->mac_header = skb->network_header;
+ }
+ 
  static inline void skb_probe_transport_header(struct sk_buff *skb,
  					      const int offset_hint)
  {
@@@ -2469,24 -2397,6 +2474,24 @@@ static inline void *skb_header_pointer(
  	return buffer;
  }
  
 +/**
 + *	skb_needs_linearize - check if we need to linearize a given skb
 + *			      depending on the given device features.
 + *	@skb: socket buffer to check
 + *	@features: net device features
 + *
 + *	Returns true if either:
 + *	1. skb has frag_list and the device doesn't support FRAGLIST, or
 + *	2. skb is fragmented and the device does not support SG.
 + */
 +static inline bool skb_needs_linearize(struct sk_buff *skb,
 +				       netdev_features_t features)
 +{
 +	return skb_is_nonlinear(skb) &&
 +	       ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
 +		(skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
 +}
 +
  static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
  					     void *to,
  					     const unsigned int len)
@@@ -2621,6 -2531,10 +2626,10 @@@ static inline void sw_tx_timestamp(stru
   * Ethernet MAC Drivers should call this function in their hard_xmit()
   * function immediately before giving the sk_buff to the MAC hardware.
   *
+  * Specifically, one should make absolutely sure that this function is
+  * called before TX completion of this packet can trigger.  Otherwise
+  * the packet could potentially already be freed.
+  *
   * @skb: A socket buffer.
   */
  static inline void skb_tx_timestamp(struct sk_buff *skb)
diff --combined include/net/sctp/structs.h
index 41c7013,0a248b3..e9f732f
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@@ -19,8 -19,9 +19,8 @@@
   * See the GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
 - * along with GNU CC; see the file COPYING.  If not, write to
 - * the Free Software Foundation, 59 Temple Place - Suite 330,
 - * Boston, MA 02111-1307, USA.
 + * along with GNU CC; see the file COPYING.  If not, see
 + * <http://www.gnu.org/licenses/>.
   *
   * Please send any bug reports or fixes you make to the
   * email addresses:
@@@ -1045,9 -1046,6 +1045,6 @@@ struct sctp_outq 
  
  	/* Corked? */
  	char cork;
- 
- 	/* Is this structure empty?  */
- 	char empty;
  };
  
  void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
@@@ -1360,6 -1358,12 +1357,6 @@@ struct sctp_association 
  
  	/* This is all information about our peer.  */
  	struct {
 -		/* rwnd
 -		 *
 -		 * Peer Rwnd   : Current calculated value of the peer's rwnd.
 -		 */
 -		__u32 rwnd;
 -
  		/* transport_addr_list
  		 *
  		 * Peer	       : A list of SCTP transport addresses that the
@@@ -1377,12 -1381,6 +1374,12 @@@
  		 */
  		struct list_head transport_addr_list;
  
 +		/* rwnd
 +		 *
 +		 * Peer Rwnd   : Current calculated value of the peer's rwnd.
 +		 */
 +		__u32 rwnd;
 +
  		/* transport_count
  		 *
  		 * Peer        : A count of the number of peer addresses
@@@ -1465,20 -1463,6 +1462,20 @@@
  		 */
  		struct sctp_tsnmap tsn_map;
  
 +		/* This mask is used to disable sending the ASCONF chunk
 +		 * with specified parameter to peer.
 +		 */
 +		__be16 addip_disabled_mask;
 +
 +		/* These are capabilities which our peer advertised.  */
 +		__u8	ecn_capable:1,      /* Can peer do ECN? */
 +			ipv4_address:1,     /* Peer understands IPv4 addresses? */
 +			ipv6_address:1,     /* Peer understands IPv6 addresses? */
 +			hostname_address:1, /* Peer understands DNS addresses? */
 +			asconf_capable:1,   /* Does peer support ADDIP? */
 +			prsctp_capable:1,   /* Can peer do PR-SCTP? */
 +			auth_capable:1;     /* Is peer doing SCTP-AUTH? */
 +
  		/* Ack State   : This flag indicates if the next received
  		 *             : packet is to be responded to with a
  		 *             : SACK. This is initializedto 0.  When a packet
@@@ -1493,11 -1477,25 +1490,11 @@@
  		__u32	sack_cnt;
  		__u32	sack_generation;
  
 -		/* These are capabilities which our peer advertised.  */
 -		__u8	ecn_capable:1,	    /* Can peer do ECN? */
 -			ipv4_address:1,	    /* Peer understands IPv4 addresses? */
 -			ipv6_address:1,	    /* Peer understands IPv6 addresses? */
 -			hostname_address:1, /* Peer understands DNS addresses? */
 -			asconf_capable:1,   /* Does peer support ADDIP? */
 -			prsctp_capable:1,   /* Can peer do PR-SCTP? */
 -			auth_capable:1;	    /* Is peer doing SCTP-AUTH? */
 -
  		__u32   adaptation_ind;	 /* Adaptation Code point. */
  
 -		/* This mask is used to disable sending the ASCONF chunk
 -		 * with specified parameter to peer.
 -		 */
 -		__be16 addip_disabled_mask;
 -
  		struct sctp_inithdr_host i;
 -		int cookie_len;
  		void *cookie;
 +		int cookie_len;
  
  		/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
  		 * C1) ... "Peer-Serial-Number'. This value MUST be initialized to the
@@@ -1529,14 -1527,14 +1526,14 @@@
  	 */
  	sctp_state_t state;
  
 -	/* The cookie life I award for any cookie.  */
 -	ktime_t cookie_life;
 -
  	/* Overall     : The overall association error count.
  	 * Error Count : [Clear this any time I get something.]
  	 */
  	int overall_error_count;
  
 +	/* The cookie life I award for any cookie.  */
 +	ktime_t cookie_life;
 +
  	/* These are the association's initial, max, and min RTO values.
  	 * These values will be initialized by system defaults, but can
  	 * be modified via the SCTP_RTOINFO socket option.
@@@ -1591,9 -1589,10 +1588,9 @@@
  	/* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
  	__u32 param_flags;
  
 +	__u32 sackfreq;
  	/* SACK delay timeout */
  	unsigned long sackdelay;
 -	__u32 sackfreq;
 -
  
  	unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
  	struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
@@@ -1601,12 -1600,12 +1598,12 @@@
  	/* Transport to which SHUTDOWN chunk was last sent.  */
  	struct sctp_transport *shutdown_last_sent_to;
  
  	/* Transport to which INIT chunk was last sent.  */
  	struct sctp_transport *init_last_sent_to;
  
 +	/* How many times have we resent a SHUTDOWN */
 +	int shutdown_retries;
 +
  	/* Next TSN    : The next TSN number to be assigned to a new
  	 *	       : DATA chunk.  This is sent in the INIT or INIT
  	 *	       : ACK chunk to the peer and incremented each
@@@ -1811,8 -1810,8 +1808,8 @@@
  	 * after reaching 4294967295.
  	 */
  	__u32 addip_serial;
 -	union sctp_addr *asconf_addr_del_pending;
  	int src_out_of_asoc_ok;
 +	union sctp_addr *asconf_addr_del_pending;
  	struct sctp_transport *new_transport;
  
  	/* SCTP AUTH: list of the endpoint shared keys.  These
diff --combined net/batman-adv/translation-table.c
index 06506e6,ff625fe..19bc42f
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@@ -51,7 -51,7 +51,7 @@@ static int batadv_compare_tt(const stru
  	const void *data1 = container_of(node, struct batadv_tt_common_entry,
  					 hash_entry);
  
 -	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 +	return batadv_compare_eth(data1, data2);
  }
  
  /**
@@@ -333,7 -333,8 +333,8 @@@ static void batadv_tt_local_event(struc
  		return;
  
  	tt_change_node->change.flags = flags;
- 	tt_change_node->change.reserved = 0;
+ 	memset(tt_change_node->change.reserved, 0,
+ 	       sizeof(tt_change_node->change.reserved));
  	memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
  	tt_change_node->change.vid = htons(common->vid);
  
@@@ -2221,7 -2222,8 +2222,8 @@@ static void batadv_tt_tvlv_generate(str
  			       ETH_ALEN);
  			tt_change->flags = tt_common_entry->flags;
  			tt_change->vid = htons(tt_common_entry->vid);
- 			tt_change->reserved = 0;
+ 			memset(tt_change->reserved, 0,
+ 			       sizeof(tt_change->reserved));
  
  			tt_num_entries++;
  			tt_change++;
diff --combined net/core/dev.c
index 153ee2f,4fc1722..e5e23d7
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -480,7 -480,7 +480,7 @@@ EXPORT_SYMBOL(dev_add_offload)
   *	and must not be freed until after all the CPU's have gone
   *	through a quiescent state.
   */
 -void __dev_remove_offload(struct packet_offload *po)
 +static void __dev_remove_offload(struct packet_offload *po)
  {
  	struct list_head *head = &offload_base;
  	struct packet_offload *po1;
@@@ -498,6 -498,7 +498,6 @@@
  out:
  	spin_unlock(&offload_lock);
  }
 -EXPORT_SYMBOL(__dev_remove_offload);
  
  /**
   *	dev_remove_offload	 - remove packet offload handler
@@@ -1565,14 -1566,14 +1565,14 @@@ EXPORT_SYMBOL(unregister_netdevice_noti
   *	are as for raw_notifier_call_chain().
   */
  
 -int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
 -				  struct netdev_notifier_info *info)
 +static int call_netdevice_notifiers_info(unsigned long val,
 +					 struct net_device *dev,
 +					 struct netdev_notifier_info *info)
  {
  	ASSERT_RTNL();
  	netdev_notifier_info_init(info, dev);
  	return raw_notifier_call_chain(&netdev_chain, val, info);
  }
 -EXPORT_SYMBOL(call_netdevice_notifiers_info);
  
  /**
   *	call_netdevice_notifiers - call all network notifier blocks
@@@ -2144,42 -2145,30 +2144,42 @@@ void __netif_schedule(struct Qdisc *q
  }
  EXPORT_SYMBOL(__netif_schedule);
  
 -void dev_kfree_skb_irq(struct sk_buff *skb)
 +struct dev_kfree_skb_cb {
 +	enum skb_free_reason reason;
 +};
 +
 +static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
  {
 -	if (atomic_dec_and_test(&skb->users)) {
 -		struct softnet_data *sd;
 -		unsigned long flags;
 +	return (struct dev_kfree_skb_cb *)skb->cb;
 +}
  
 -		local_irq_save(flags);
 -		sd = &__get_cpu_var(softnet_data);
 -		skb->next = sd->completion_queue;
 -		sd->completion_queue = skb;
 -		raise_softirq_irqoff(NET_TX_SOFTIRQ);
 -		local_irq_restore(flags);
 +void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
 +{
 +	unsigned long flags;
 +
 +	if (likely(atomic_read(&skb->users) == 1)) {
 +		smp_rmb();
 +		atomic_set(&skb->users, 0);
 +	} else if (likely(!atomic_dec_and_test(&skb->users))) {
 +		return;
  	}
 +	get_kfree_skb_cb(skb)->reason = reason;
 +	local_irq_save(flags);
 +	skb->next = __this_cpu_read(softnet_data.completion_queue);
 +	__this_cpu_write(softnet_data.completion_queue, skb);
 +	raise_softirq_irqoff(NET_TX_SOFTIRQ);
 +	local_irq_restore(flags);
  }
 -EXPORT_SYMBOL(dev_kfree_skb_irq);
 +EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
 -void dev_kfree_skb_any(struct sk_buff *skb)
 +void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
  {
  	if (in_irq() || irqs_disabled())
 -		dev_kfree_skb_irq(skb);
 +		__dev_kfree_skb_irq(skb, reason);
  	else
  		dev_kfree_skb(skb);
  }
 -EXPORT_SYMBOL(dev_kfree_skb_any);
 +EXPORT_SYMBOL(__dev_kfree_skb_any);
  
  
  /**
@@@ -2453,8 -2442,13 +2453,8 @@@ static void dev_gso_skb_destructor(stru
  {
  	struct dev_gso_cb *cb;
  
 -	do {
 -		struct sk_buff *nskb = skb->next;
 -
 -		skb->next = nskb->next;
 -		nskb->next = NULL;
 -		kfree_skb(nskb);
 -	} while (skb->next);
 +	kfree_skb_list(skb->next);
 +	skb->next = NULL;
  
  	cb = DEV_GSO_CB(skb);
  	if (cb->destructor)
@@@ -2529,6 -2523,21 +2529,6 @@@ netdev_features_t netif_skb_features(st
  }
  EXPORT_SYMBOL(netif_skb_features);
  
 -/*
 - * Returns true if either:
 - *	1. skb has frag_list and the device doesn't support FRAGLIST, or
 - *	2. skb is fragmented and the device does not support SG.
 - */
 -static inline int skb_needs_linearize(struct sk_buff *skb,
 -				      netdev_features_t features)
 -{
 -	return skb_is_nonlinear(skb) &&
 -			((skb_has_frag_list(skb) &&
 -				!(features & NETIF_F_FRAGLIST)) ||
 -			(skb_shinfo(skb)->nr_frags &&
 -				!(features & NETIF_F_SG)));
 -}
 -
  int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
  			struct netdev_queue *txq, void *accel_priv)
  {
@@@ -2741,7 -2750,7 +2741,7 @@@ static inline int __dev_xmit_skb(struc
  	return rc;
  }
  
 -#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
 +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  static void skb_update_prio(struct sk_buff *skb)
  {
  	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
@@@ -3000,7 -3009,7 +3000,7 @@@ static int get_rps_cpu(struct net_devic
  	}
  
  	skb_reset_network_header(skb);
 -	if (!skb_get_rxhash(skb))
 +	if (!skb_get_hash(skb))
  		goto done;
  
  	flow_table = rcu_dereference(rxqueue->rps_flow_table);
@@@ -3145,7 -3154,7 +3145,7 @@@ static bool skb_flow_limit(struct sk_bu
  	rcu_read_lock();
  	fl = rcu_dereference(sd->flow_limit);
  	if (fl) {
 -		new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
 +		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
  		old_flow = fl->history[fl->history_head];
  		fl->history[fl->history_head] = new_flow;
  
@@@ -3297,10 -3306,7 +3297,10 @@@ static void net_tx_action(struct softir
  			clist = clist->next;
  
  			WARN_ON(atomic_read(&skb->users));
 -			trace_kfree_skb(skb, net_tx_action);
 +			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
 +				trace_consume_skb(skb);
 +			else
 +				trace_kfree_skb(skb, net_tx_action);
  			__kfree_skb(skb);
  		}
  	}
@@@ -3746,7 -3752,7 +3746,7 @@@ static int napi_gro_complete(struct sk_
  		if (ptype->type != type || !ptype->callbacks.gro_complete)
  			continue;
  
 -		err = ptype->callbacks.gro_complete(skb);
 +		err = ptype->callbacks.gro_complete(skb, 0);
  		break;
  	}
  	rcu_read_unlock();
@@@ -3812,23 -3818,6 +3812,23 @@@ static void gro_list_prepare(struct nap
  	}
  }
  
 +static void skb_gro_reset_offset(struct sk_buff *skb)
 +{
 +	const struct skb_shared_info *pinfo = skb_shinfo(skb);
 +	const skb_frag_t *frag0 = &pinfo->frags[0];
 +
 +	NAPI_GRO_CB(skb)->data_offset = 0;
 +	NAPI_GRO_CB(skb)->frag0 = NULL;
 +	NAPI_GRO_CB(skb)->frag0_len = 0;
 +
 +	if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
 +	    pinfo->nr_frags &&
 +	    !PageHighMem(skb_frag_page(frag0))) {
 +		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
 +		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
 +	}
 +}
 +
  static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
  	struct sk_buff **pp = NULL;
@@@ -3844,7 -3833,6 +3844,7 @@@
  	if (skb_is_gso(skb) || skb_has_frag_list(skb))
  		goto normal;
  
 +	skb_gro_reset_offset(skb);
  	gro_list_prepare(napi, skb);
  
  	rcu_read_lock();
@@@ -3950,8 -3938,27 +3950,8 @@@ static gro_result_t napi_skb_finish(gro
  	return ret;
  }
  
 -static void skb_gro_reset_offset(struct sk_buff *skb)
 -{
 -	const struct skb_shared_info *pinfo = skb_shinfo(skb);
 -	const skb_frag_t *frag0 = &pinfo->frags[0];
 -
 -	NAPI_GRO_CB(skb)->data_offset = 0;
 -	NAPI_GRO_CB(skb)->frag0 = NULL;
 -	NAPI_GRO_CB(skb)->frag0_len = 0;
 -
 -	if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
 -	    pinfo->nr_frags &&
 -	    !PageHighMem(skb_frag_page(frag0))) {
 -		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
 -		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
 -	}
 -}
 -
  gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
 -	skb_gro_reset_offset(skb);
 -
  	return napi_skb_finish(dev_gro_receive(napi, skb), skb);
  }
  EXPORT_SYMBOL(napi_gro_receive);
@@@ -3974,7 -3981,8 +3974,7 @@@ struct sk_buff *napi_get_frags(struct n
  
  	if (!skb) {
  		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
 -		if (skb)
 -			napi->skb = skb;
 +		napi->skb = skb;
  	}
  	return skb;
  }
@@@ -3985,7 -3993,12 +3985,7 @@@ static gro_result_t napi_frags_finish(s
  {
  	switch (ret) {
  	case GRO_NORMAL:
 -	case GRO_HELD:
 -		skb->protocol = eth_type_trans(skb, skb->dev);
 -
 -		if (ret == GRO_HELD)
 -			skb_gro_pull(skb, -ETH_HLEN);
 -		else if (netif_receive_skb(skb))
 +		if (netif_receive_skb(skb))
  			ret = GRO_DROP;
  		break;
  
@@@ -3994,7 -4007,6 +3994,7 @@@
  		napi_reuse_skb(napi, skb);
  		break;
  
 +	case GRO_HELD:
  	case GRO_MERGED:
  		break;
  	}
@@@ -4005,15 -4017,36 +4005,15 @@@
  static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  {
  	struct sk_buff *skb = napi->skb;
  
  	napi->skb = NULL;
  
 -	skb_reset_mac_header(skb);
 -	skb_gro_reset_offset(skb);
 -
 -	off = skb_gro_offset(skb);
 -	hlen = off + sizeof(*eth);
 -	eth = skb_gro_header_fast(skb, off);
 -	if (skb_gro_header_hard(skb, hlen)) {
 -		eth = skb_gro_header_slow(skb, hlen, off);
 -		if (unlikely(!eth)) {
 -			napi_reuse_skb(napi, skb);
 -			skb = NULL;
 -			goto out;
 -		}
 +	if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
 +		napi_reuse_skb(napi, skb);
 +		return NULL;
  	}
 +	skb->protocol = eth_type_trans(skb, skb->dev);
  
 -	skb_gro_pull(skb, sizeof(*eth));
 -
 -	/*
 -	 * This works because the only protocols we care about don't require
 -	 * special handling.  We'll fix it up properly at the end.
 -	 */
 -	skb->protocol = eth->h_proto;
 -
 -out:
  	return skb;
  }
  
@@@ -4029,7 -4062,7 +4029,7 @@@ gro_result_t napi_gro_frags(struct napi
  EXPORT_SYMBOL(napi_gro_frags);
  
  /*
 - * net_rps_action sends any pending IPI's for rps.
 + * net_rps_action_and_irq_enable sends any pending IPI's for rps.
   * Note: called with local irq disabled, but exits with local irq enabled.
   */
  static void net_rps_action_and_irq_enable(struct softnet_data *sd)
@@@ -4234,10 -4267,17 +4234,10 @@@ EXPORT_SYMBOL(netif_napi_add)
  
  void netif_napi_del(struct napi_struct *napi)
  {
  	list_del_init(&napi->dev_list);
  	napi_free_frags(napi);
  
 -	for (skb = napi->gro_list; skb; skb = next) {
 -		next = skb->next;
 -		skb->next = NULL;
 -		kfree_skb(skb);
 -	}
 -
 +	kfree_skb_list(napi->gro_list);
  	napi->gro_list = NULL;
  	napi->gro_count = 0;
  }
@@@ -4354,6 -4394,19 +4354,6 @@@ struct netdev_adjacent 
  	struct rcu_head rcu;
  };
  
 -static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
 -						     struct net_device *adj_dev,
 -						     struct list_head *adj_list)
 -{
 -	struct netdev_adjacent *adj;
 -
 -	list_for_each_entry_rcu(adj, adj_list, list) {
 -		if (adj->dev == adj_dev)
 -			return adj;
 -	}
 -	return NULL;
 -}
 -
  static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
  						 struct net_device *adj_dev,
  						 struct list_head *adj_list)
@@@ -4392,12 -4445,13 +4392,12 @@@ EXPORT_SYMBOL(netdev_has_upper_dev)
   * Find out if a device is linked to an upper device and return true in case
   * it is. The caller must hold the RTNL lock.
   */
 -bool netdev_has_any_upper_dev(struct net_device *dev)
 +static bool netdev_has_any_upper_dev(struct net_device *dev)
  {
  	ASSERT_RTNL();
  
  	return !list_empty(&dev->all_adj_list.upper);
  }
 -EXPORT_SYMBOL(netdev_has_any_upper_dev);
  
  /**
   * netdev_master_upper_dev_get - Get master upper device
@@@ -4446,7 -4500,7 +4446,7 @@@ struct net_device *netdev_all_upper_get
  {
  	struct netdev_adjacent *upper;
  
- 	WARN_ON_ONCE(!rcu_read_lock_held());
+ 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
  
  	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  
@@@ -4517,27 -4571,6 +4517,27 @@@ void *netdev_lower_get_next_private_rcu
  EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  
  /**
 + * netdev_lower_get_first_private_rcu - Get the first ->private from the
 + *				       lower neighbour list, RCU
 + *				       variant
 + * @dev: device
 + *
 + * Gets the first netdev_adjacent->private from the dev's lower neighbour
 + * list. The caller must hold RCU read lock.
 + */
 +void *netdev_lower_get_first_private_rcu(struct net_device *dev)
 +{
 +	struct netdev_adjacent *lower;
 +
 +	lower = list_first_or_null_rcu(&dev->adj_list.lower,
 +			struct netdev_adjacent, list);
 +	if (lower)
 +		return lower->private;
 +	return NULL;
 +}
 +EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
 +
 +/**
   * netdev_master_upper_dev_get_rcu - Get master upper device
   * @dev: device
   *
@@@ -4629,9 -4662,9 +4629,9 @@@ free_adj
  	return ret;
  }
  
 -void __netdev_adjacent_dev_remove(struct net_device *dev,
 -				  struct net_device *adj_dev,
 -				  struct list_head *dev_list)
 +static void __netdev_adjacent_dev_remove(struct net_device *dev,
 +					 struct net_device *adj_dev,
 +					 struct list_head *dev_list)
  {
  	struct netdev_adjacent *adj;
  	char linkname[IFNAMSIZ+7];
@@@ -4669,11 -4702,11 +4669,11 @@@
  	kfree_rcu(adj, rcu);
  }
  
 -int __netdev_adjacent_dev_link_lists(struct net_device *dev,
 -				     struct net_device *upper_dev,
 -				     struct list_head *up_list,
 -				     struct list_head *down_list,
 -				     void *private, bool master)
 +static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
 +					    struct net_device *upper_dev,
 +					    struct list_head *up_list,
 +					    struct list_head *down_list,
 +					    void *private, bool master)
  {
  	int ret;
  
@@@ -4692,8 -4725,8 +4692,8 @@@
  	return 0;
  }
  
 -int __netdev_adjacent_dev_link(struct net_device *dev,
 -			       struct net_device *upper_dev)
 +static int __netdev_adjacent_dev_link(struct net_device *dev,
 +				      struct net_device *upper_dev)
  {
  	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
  						&dev->all_adj_list.upper,
@@@ -4701,26 -4734,26 +4701,26 @@@
  						NULL, false);
  }
  
 -void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
 -					struct net_device *upper_dev,
 -					struct list_head *up_list,
 -					struct list_head *down_list)
 +static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
 +					       struct net_device *upper_dev,
 +					       struct list_head *up_list,
 +					       struct list_head *down_list)
  {
  	__netdev_adjacent_dev_remove(dev, upper_dev, up_list);
  	__netdev_adjacent_dev_remove(upper_dev, dev, down_list);
  }
  
 -void __netdev_adjacent_dev_unlink(struct net_device *dev,
 -				  struct net_device *upper_dev)
 +static void __netdev_adjacent_dev_unlink(struct net_device *dev,
 +					 struct net_device *upper_dev)
  {
  	__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
  					   &dev->all_adj_list.upper,
  					   &upper_dev->all_adj_list.lower);
  }
  
 -int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
 -					 struct net_device *upper_dev,
 -					 void *private, bool master)
 +static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
 +						struct net_device *upper_dev,
 +						void *private, bool master)
  {
  	int ret = __netdev_adjacent_dev_link(dev, upper_dev);
  
@@@ -4739,8 -4772,8 +4739,8 @@@
  	return 0;
  }
  
 -void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
 -					    struct net_device *upper_dev)
 +static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
 +						   struct net_device *upper_dev)
  {
  	__netdev_adjacent_dev_unlink(dev, upper_dev);
  	__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
@@@ -4929,6 -4962,21 +4929,6 @@@ void netdev_upper_dev_unlink(struct net
  }
  EXPORT_SYMBOL(netdev_upper_dev_unlink);
  
 -void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
 -				       struct net_device *lower_dev)
 -{
 -	struct netdev_adjacent *lower;
 -
 -	if (!lower_dev)
 -		return NULL;
 -	lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
 -	if (!lower)
 -		return NULL;
 -
 -	return lower->private;
 -}
 -EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
 -
  void *netdev_lower_dev_get_private(struct net_device *dev,
  				   struct net_device *lower_dev)
  {
diff --combined net/core/neighbour.c
index a666740,932c6d7..ea97361
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@@ -38,8 -38,6 +38,8 @@@
  #include <linux/random.h>
  #include <linux/string.h>
  #include <linux/log2.h>
 +#include <linux/inetdevice.h>
 +#include <net/addrconf.h>
  
  #define DEBUG
  #define NEIGH_DEBUG 1
@@@ -499,7 -497,7 +499,7 @@@ struct neighbour *__neigh_create(struc
  		goto out_neigh_release;
  	}
  
 -	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
 +	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
  
  	write_lock_bh(&tbl->lock);
  	nht = rcu_dereference_protected(tbl->nht,
@@@ -778,7 -776,7 +778,7 @@@ static void neigh_periodic_work(struct 
  		tbl->last_rand = jiffies;
  		for (p = &tbl->parms; p; p = p->next)
  			p->reachable_time =
 -				neigh_rand_reach_time(p->base_reachable_time);
 +				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
  	}
  
  	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@@ -801,7 -799,7 +801,7 @@@
  
  			if (atomic_read(&n->refcnt) == 1 &&
  			    (state == NUD_FAILED ||
 -			     time_after(jiffies, n->used + n->parms->gc_staletime))) {
 +			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
  				*np = n->next;
  				n->dead = 1;
  				write_unlock(&n->lock);
@@@ -824,12 -822,12 +824,12 @@@ next_elt
  						lockdep_is_held(&tbl->lock));
  	}
  out:
 -	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
 -	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
 -	 * base_reachable_time.
 +	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
 +	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
 +	 * BASE_REACHABLE_TIME.
  	 */
  	schedule_delayed_work(&tbl->gc_work,
 -			      tbl->parms.base_reachable_time >> 1);
 +			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
  	write_unlock_bh(&tbl->lock);
  }
  
@@@ -837,9 -835,8 +837,9 @@@ static __inline__ int neigh_max_probes(
  {
  	struct neigh_parms *p = n->parms;
  	return (n->nud_state & NUD_PROBE) ?
 -		p->ucast_probes :
 -		p->ucast_probes + p->app_probes + p->mcast_probes;
 +		NEIGH_VAR(p, UCAST_PROBES) :
 +		NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
 +		NEIGH_VAR(p, MCAST_PROBES);
  }
  
  static void neigh_invalidate(struct neighbour *neigh)
@@@ -904,13 -901,12 +904,13 @@@ static void neigh_timer_handler(unsigne
  			neigh_dbg(2, "neigh %p is still alive\n", neigh);
  			next = neigh->confirmed + neigh->parms->reachable_time;
  		} else if (time_before_eq(now,
 -					  neigh->used + neigh->parms->delay_probe_time)) {
 +					  neigh->used +
 +					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
  			neigh_dbg(2, "neigh %p is delayed\n", neigh);
  			neigh->nud_state = NUD_DELAY;
  			neigh->updated = jiffies;
  			neigh_suspect(neigh);
 -			next = now + neigh->parms->delay_probe_time;
 +			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
  		} else {
  			neigh_dbg(2, "neigh %p is suspected\n", neigh);
  			neigh->nud_state = NUD_STALE;
@@@ -920,8 -916,7 +920,8 @@@
  		}
  	} else if (state & NUD_DELAY) {
  		if (time_before_eq(now,
 -				   neigh->confirmed + neigh->parms->delay_probe_time)) {
 +				   neigh->confirmed +
 +				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
  			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
  			neigh->nud_state = NUD_REACHABLE;
  			neigh->updated = jiffies;
@@@ -933,11 -928,11 +933,11 @@@
  			neigh->nud_state = NUD_PROBE;
  			neigh->updated = jiffies;
  			atomic_set(&neigh->probes, 0);
 -			next = now + neigh->parms->retrans_time;
 +			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
  		}
  	} else {
  		/* NUD_PROBE|NUD_INCOMPLETE */
 -		next = now + neigh->parms->retrans_time;
 +		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
  	}
  
  	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
@@@ -978,16 -973,13 +978,16 @@@ int __neigh_event_send(struct neighbou
  		goto out_unlock_bh;
  
  	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
 -		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
 +		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
 +		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
  			unsigned long next, now = jiffies;
  
 -			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
 +			atomic_set(&neigh->probes,
 +				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
  			neigh->nud_state     = NUD_INCOMPLETE;
  			neigh->updated = now;
 -			next = now + max(neigh->parms->retrans_time, HZ/2);
 +			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
 +					 HZ/2);
  			neigh_add_timer(neigh, next);
  			immediate_probe = true;
  		} else {
@@@ -1002,14 -994,14 +1002,14 @@@
  		neigh_dbg(2, "neigh %p is delayed\n", neigh);
  		neigh->nud_state = NUD_DELAY;
  		neigh->updated = jiffies;
 -		neigh_add_timer(neigh,
 -				jiffies + neigh->parms->delay_probe_time);
 +		neigh_add_timer(neigh, jiffies +
 +				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
  	}
  
  	if (neigh->nud_state == NUD_INCOMPLETE) {
  		if (skb) {
  			while (neigh->arp_queue_len_bytes + skb->truesize >
 -			       neigh->parms->queue_len_bytes) {
 +			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
  				struct sk_buff *buff;
  
  				buff = __skb_dequeue(&neigh->arp_queue);
@@@ -1179,7 -1171,7 +1179,7 @@@ int neigh_update(struct neighbour *neig
  		neigh_update_hhs(neigh);
  		if (!(new & NUD_CONNECTED))
  			neigh->confirmed = jiffies -
 -				      (neigh->parms->base_reachable_time << 1);
 +				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
  		notify = 1;
  	}
  	if (new == old)
@@@ -1239,21 -1231,6 +1239,21 @@@ out
  }
  EXPORT_SYMBOL(neigh_update);
  
 +/* Update the neigh to listen temporarily for probe responses, even if it is
 + * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
 + */
 +void __neigh_set_probe_once(struct neighbour *neigh)
 +{
 +	neigh->updated = jiffies;
 +	if (!(neigh->nud_state & NUD_FAILED))
 +		return;
 +	neigh->nud_state = NUD_PROBE;
 +	atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
 +	neigh_add_timer(neigh,
 +			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
 +}
 +EXPORT_SYMBOL(__neigh_set_probe_once);
 +
  struct neighbour *neigh_event_ns(struct neigh_table *tbl,
  				 u8 *lladdr, void *saddr,
  				 struct net_device *dev)
@@@ -1298,7 -1275,7 +1298,7 @@@ int neigh_compat_output(struct neighbou
  
  	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
  			    skb->len) < 0 &&
- 	    dev->header_ops->rebuild(skb))
+ 	    dev_rebuild_header(skb))
  		return 0;
  
  	return dev_queue_xmit(skb);
@@@ -1415,10 -1392,9 +1415,10 @@@ void pneigh_enqueue(struct neigh_table 
  		    struct sk_buff *skb)
  {
  	unsigned long now = jiffies;
 -	unsigned long sched_next = now + (net_random() % p->proxy_delay);
 +	unsigned long sched_next = now + (net_random() %
 +					  NEIGH_VAR(p, PROXY_DELAY));
  
 -	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
 +	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
  		kfree_skb(skb);
  		return;
  	}
@@@ -1465,7 -1441,7 +1465,7 @@@ struct neigh_parms *neigh_parms_alloc(s
  		p->tbl		  = tbl;
  		atomic_set(&p->refcnt, 1);
  		p->reachable_time =
 -				neigh_rand_reach_time(p->base_reachable_time);
 +				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
  		dev_hold(dev);
  		p->dev = dev;
  		write_pnet(&p->net, hold_net(net));
@@@ -1482,8 -1458,6 +1482,8 @@@
  		p->next		= tbl->parms.next;
  		tbl->parms.next = p;
  		write_unlock_bh(&tbl->lock);
 +
 +		neigh_parms_data_state_cleanall(p);
  	}
  	return p;
  }
@@@ -1536,7 -1510,7 +1536,7 @@@ static void neigh_table_init_no_netlink
  	write_pnet(&tbl->parms.net, &init_net);
  	atomic_set(&tbl->parms.refcnt, 1);
  	tbl->parms.reachable_time =
 -			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
 +			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
  
  	tbl->stats = alloc_percpu(struct neigh_statistics);
  	if (!tbl->stats)
@@@ -1804,32 -1778,24 +1804,32 @@@ static int neightbl_fill_parms(struct s
  	if ((parms->dev &&
  	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
  	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
 -	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
 +	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
 +			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
  	    /* approximative value for deprecated QUEUE_LEN (in packets) */
  	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
 -			parms->queue_len_bytes / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
 -	    nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
 -	    nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
 -	    nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
 -	    nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
 +			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
 +	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
 +	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
 +	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
 +			NEIGH_VAR(parms, UCAST_PROBES)) ||
 +	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
 +			NEIGH_VAR(parms, MCAST_PROBES)) ||
  	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
  	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
 -			  parms->base_reachable_time) ||
 -	    nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
 +			  NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
 +	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
 +			  NEIGH_VAR(parms, GC_STALETIME)) ||
  	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
 -			  parms->delay_probe_time) ||
 -	    nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
 -	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
 -	    nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
 -	    nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
 +			  NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
 +	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
 +			  NEIGH_VAR(parms, RETRANS_TIME)) ||
 +	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
 +			  NEIGH_VAR(parms, ANYCAST_DELAY)) ||
 +	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
 +			  NEIGH_VAR(parms, PROXY_DELAY)) ||
 +	    nla_put_msecs(skb, NDTPA_LOCKTIME,
 +			  NEIGH_VAR(parms, LOCKTIME)))
  		goto nla_put_failure;
  	return nla_nest_end(skb, nest);
  
@@@ -2045,54 -2011,44 +2045,54 @@@ static int neightbl_set(struct sk_buff 
  
  			switch (i) {
  			case NDTPA_QUEUE_LEN:
 -				p->queue_len_bytes = nla_get_u32(tbp[i]) *
 -						     SKB_TRUESIZE(ETH_FRAME_LEN);
 +				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
 +					      nla_get_u32(tbp[i]) *
 +					      SKB_TRUESIZE(ETH_FRAME_LEN));
  				break;
  			case NDTPA_QUEUE_LENBYTES:
 -				p->queue_len_bytes = nla_get_u32(tbp[i]);
 +				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
 +					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_PROXY_QLEN:
 -				p->proxy_qlen = nla_get_u32(tbp[i]);
 +				NEIGH_VAR_SET(p, PROXY_QLEN,
 +					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_APP_PROBES:
 -				p->app_probes = nla_get_u32(tbp[i]);
 +				NEIGH_VAR_SET(p, APP_PROBES,
 +					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_UCAST_PROBES:
 -				p->ucast_probes = nla_get_u32(tbp[i]);
 +				NEIGH_VAR_SET(p, UCAST_PROBES,
 +					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_MCAST_PROBES:
 -				p->mcast_probes = nla_get_u32(tbp[i]);
 +				NEIGH_VAR_SET(p, MCAST_PROBES,
 +					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_BASE_REACHABLE_TIME:
 -				p->base_reachable_time = nla_get_msecs(tbp[i]);
 +				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
 +					      nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_GC_STALETIME:
 -				p->gc_staletime = nla_get_msecs(tbp[i]);
 +				NEIGH_VAR_SET(p, GC_STALETIME,
 +					      nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_DELAY_PROBE_TIME:
 -				p->delay_probe_time = nla_get_msecs(tbp[i]);
 +				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
 +					      nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_RETRANS_TIME:
 -				p->retrans_time = nla_get_msecs(tbp[i]);
 +				NEIGH_VAR_SET(p, RETRANS_TIME,
 +					      nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_ANYCAST_DELAY:
 -				p->anycast_delay = nla_get_msecs(tbp[i]);
 +				NEIGH_VAR_SET(p, ANYCAST_DELAY, nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_PROXY_DELAY:
 -				p->proxy_delay = nla_get_msecs(tbp[i]);
 +				NEIGH_VAR_SET(p, PROXY_DELAY, nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_LOCKTIME:
 -				p->locktime = nla_get_msecs(tbp[i]);
 +				NEIGH_VAR_SET(p, LOCKTIME, nla_get_msecs(tbp[i]));
  				break;
  			}
  		}
@@@ -2833,167 -2789,133 +2833,167 @@@ static int proc_unres_qlen(struct ctl_t
  	return ret;
  }
  
 -enum {
 -	NEIGH_VAR_MCAST_PROBE,
 -	NEIGH_VAR_UCAST_PROBE,
 -	NEIGH_VAR_APP_PROBE,
 -	NEIGH_VAR_RETRANS_TIME,
 -	NEIGH_VAR_BASE_REACHABLE_TIME,
 -	NEIGH_VAR_DELAY_PROBE_TIME,
 -	NEIGH_VAR_GC_STALETIME,
 -	NEIGH_VAR_QUEUE_LEN,
 -	NEIGH_VAR_QUEUE_LEN_BYTES,
 -	NEIGH_VAR_PROXY_QLEN,
 -	NEIGH_VAR_ANYCAST_DELAY,
 -	NEIGH_VAR_PROXY_DELAY,
 -	NEIGH_VAR_LOCKTIME,
 -	NEIGH_VAR_RETRANS_TIME_MS,
 -	NEIGH_VAR_BASE_REACHABLE_TIME_MS,
 -	NEIGH_VAR_GC_INTERVAL,
 -	NEIGH_VAR_GC_THRESH1,
 -	NEIGH_VAR_GC_THRESH2,
 -	NEIGH_VAR_GC_THRESH3,
 -	NEIGH_VAR_MAX
 -};
 +static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
 +						   int family)
 +{
 +	switch (family) {
 +	case AF_INET:
 +		return __in_dev_arp_parms_get_rcu(dev);
 +	case AF_INET6:
 +		return __in6_dev_nd_parms_get_rcu(dev);
 +	}
 +	return NULL;
 +}
 +
 +static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
 +				  int index)
 +{
 +	struct net_device *dev;
 +	int family = neigh_parms_family(p);
 +
 +	rcu_read_lock();
 +	for_each_netdev_rcu(net, dev) {
 +		struct neigh_parms *dst_p =
 +				neigh_get_dev_parms_rcu(dev, family);
 +
 +		if (dst_p && !test_bit(index, dst_p->data_state))
 +			dst_p->data[index] = p->data[index];
 +	}
 +	rcu_read_unlock();
 +}
 +
 +static void neigh_proc_update(struct ctl_table *ctl, int write)
 +{
 +	struct net_device *dev = ctl->extra1;
 +	struct neigh_parms *p = ctl->extra2;
 +	struct net *net = neigh_parms_net(p);
 +	int index = (int *) ctl->data - p->data;
 +
 +	if (!write)
 +		return;
 +
 +	set_bit(index, p->data_state);
 +	if (!dev) /* NULL dev means this is default value */
 +		neigh_copy_dflt_parms(net, p, index);
 +}
 +
 +static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
 +					   void __user *buffer,
 +					   size_t *lenp, loff_t *ppos)
 +{
 +	struct ctl_table tmp = *ctl;
 +	int ret;
 +
 +	tmp.extra1 = &zero;
 +	tmp.extra2 = &int_max;
 +
 +	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
 +	neigh_proc_update(ctl, write);
 +	return ret;
 +}
 +
 +int neigh_proc_dointvec(struct ctl_table *ctl, int write,
 +			void __user *buffer, size_t *lenp, loff_t *ppos)
 +{
 +	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 +
 +	neigh_proc_update(ctl, write);
 +	return ret;
 +}
 +EXPORT_SYMBOL(neigh_proc_dointvec);
 +
 +int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
 +				void __user *buffer,
 +				size_t *lenp, loff_t *ppos)
 +{
 +	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
 +
 +	neigh_proc_update(ctl, write);
 +	return ret;
 +}
 +EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
 +
 +static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
 +					      void __user *buffer,
 +					      size_t *lenp, loff_t *ppos)
 +{
 +	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
 +
 +	neigh_proc_update(ctl, write);
 +	return ret;
 +}
 +
 +int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
 +				   void __user *buffer,
 +				   size_t *lenp, loff_t *ppos)
 +{
 +	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
 +
 +	neigh_proc_update(ctl, write);
 +	return ret;
 +}
 +EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
 +
 +static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
 +					  void __user *buffer,
 +					  size_t *lenp, loff_t *ppos)
 +{
 +	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
 +
 +	neigh_proc_update(ctl, write);
 +	return ret;
 +}
 +
 +#define NEIGH_PARMS_DATA_OFFSET(index)	\
 +	(&((struct neigh_parms *) 0)->data[index])
 +
 +#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
 +	[NEIGH_VAR_ ## attr] = { \
 +		.procname	= name, \
 +		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
 +		.maxlen		= sizeof(int), \
 +		.mode		= mval, \
 +		.proc_handler	= proc, \
 +	}
 +
 +#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
 +	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
 +
 +#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
 +	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
 +
 +#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
 +	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
 +
 +#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
 +	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
 +
 +#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
 +	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
 +
 +#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
 +	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
  
  static struct neigh_sysctl_table {
  	struct ctl_table_header *sysctl_header;
  	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
  } neigh_sysctl_template __read_mostly = {
  	.neigh_vars = {
 -		[NEIGH_VAR_MCAST_PROBE] = {
 -			.procname	= "mcast_solicit",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.extra1 	= &zero,
 -			.extra2		= &int_max,
 -			.proc_handler	= proc_dointvec_minmax,
 -		},
 -		[NEIGH_VAR_UCAST_PROBE] = {
 -			.procname	= "ucast_solicit",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.extra1 	= &zero,
 -			.extra2		= &int_max,
 -			.proc_handler	= proc_dointvec_minmax,
 -		},
 -		[NEIGH_VAR_APP_PROBE] = {
 -			.procname	= "app_solicit",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.extra1 	= &zero,
 -			.extra2		= &int_max,
 -			.proc_handler	= proc_dointvec_minmax,
 -		},
 -		[NEIGH_VAR_RETRANS_TIME] = {
 -			.procname	= "retrans_time",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_dointvec_userhz_jiffies,
 -		},
 -		[NEIGH_VAR_BASE_REACHABLE_TIME] = {
 -			.procname	= "base_reachable_time",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_dointvec_jiffies,
 -		},
 -		[NEIGH_VAR_DELAY_PROBE_TIME] = {
 -			.procname	= "delay_first_probe_time",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_dointvec_jiffies,
 -		},
 -		[NEIGH_VAR_GC_STALETIME] = {
 -			.procname	= "gc_stale_time",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_dointvec_jiffies,
 -		},
 -		[NEIGH_VAR_QUEUE_LEN] = {
 -			.procname	= "unres_qlen",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_unres_qlen,
 -		},
 -		[NEIGH_VAR_QUEUE_LEN_BYTES] = {
 -			.procname	= "unres_qlen_bytes",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.extra1		= &zero,
 -			.proc_handler   = proc_dointvec_minmax,
 -		},
 -		[NEIGH_VAR_PROXY_QLEN] = {
 -			.procname	= "proxy_qlen",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.extra1 	= &zero,
 -			.extra2		= &int_max,
 -			.proc_handler	= proc_dointvec_minmax,
 -		},
 -		[NEIGH_VAR_ANYCAST_DELAY] = {
 -			.procname	= "anycast_delay",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_dointvec_userhz_jiffies,
 -		},
 -		[NEIGH_VAR_PROXY_DELAY] = {
 -			.procname	= "proxy_delay",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_dointvec_userhz_jiffies,
 -		},
 -		[NEIGH_VAR_LOCKTIME] = {
 -			.procname	= "locktime",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_dointvec_userhz_jiffies,
 -		},
 -		[NEIGH_VAR_RETRANS_TIME_MS] = {
 -			.procname	= "retrans_time_ms",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_dointvec_ms_jiffies,
 -		},
 -		[NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
 -			.procname	= "base_reachable_time_ms",
 -			.maxlen		= sizeof(int),
 -			.mode		= 0644,
 -			.proc_handler	= proc_dointvec_ms_jiffies,
 -		},
 +		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
 +		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
 +		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
 +		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
 +		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
 +		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
 +		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
 +		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
 +		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
 +		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
 +		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
 +		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
 +		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
 +		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
 +		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
  		[NEIGH_VAR_GC_INTERVAL] = {
  			.procname	= "gc_interval",
  			.maxlen		= sizeof(int),
@@@ -3029,23 -2951,31 +3029,23 @@@
  };
  
  int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
 -			  char *p_name, proc_handler *handler)
 +			  proc_handler *handler)
  {
 +	int i;
  	struct neigh_sysctl_table *t;
 -	const char *dev_name_source = NULL;
 +	const char *dev_name_source;
  	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
 +	char *p_name;
  
  	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
  	if (!t)
  		goto err;
  
 -	t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data  = &p->mcast_probes;
 -	t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data  = &p->ucast_probes;
 -	t->neigh_vars[NEIGH_VAR_APP_PROBE].data  = &p->app_probes;
 -	t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data  = &p->retrans_time;
 -	t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data  = &p->base_reachable_time;
 -	t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data  = &p->delay_probe_time;
 -	t->neigh_vars[NEIGH_VAR_GC_STALETIME].data  = &p->gc_staletime;
 -	t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data  = &p->queue_len_bytes;
 -	t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data  = &p->queue_len_bytes;
 -	t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data  = &p->proxy_qlen;
 -	t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data  = &p->anycast_delay;
 -	t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
 -	t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
 -	t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data  = &p->retrans_time;
 -	t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data  = &p->base_reachable_time;
 +	for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) {
 +		t->neigh_vars[i].data += (long) p;
 +		t->neigh_vars[i].extra1 = dev;
 +		t->neigh_vars[i].extra2 = p;
 +	}
  
  	if (dev) {
  		dev_name_source = dev->name;
@@@ -3060,32 -2990,26 +3060,32 @@@
  		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
  	}
  
  	if (handler) {
  		/* RetransTime */
  		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
 -		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
  		/* ReachableTime */
  		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
 -		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
  		/* RetransTime (in milliseconds)*/
  		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
 -		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
  		/* ReachableTime (in milliseconds) */
  		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
 -		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
  	}
  
  	/* Don't export sysctls to unprivileged users */
  	if (neigh_parms_net(p)->user_ns != &init_user_ns)
  		t->neigh_vars[0].procname = NULL;
  
 +	switch (neigh_parms_family(p)) {
 +	case AF_INET:
 +	      p_name = "ipv4";
 +	      break;
 +	case AF_INET6:
 +	      p_name = "ipv6";
 +	      break;
 +	default:
 +	      BUG();
 +	}
 +
  	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
  		p_name, dev_name_source);
  	t->sysctl_header =
diff --combined net/ipv4/udp.c
index d5d24ec,a7e4729..80f649f
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@@ -986,7 -986,7 +986,7 @@@ int udp_sendmsg(struct kiocb *iocb, str
  		fl4 = &fl4_stack;
  		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
  				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
 -				   inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
 +				   inet_sk_flowi_flags(sk),
  				   faddr, saddr, dport, inet->inet_sport);
  
  		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
@@@ -2478,6 -2478,7 +2478,7 @@@ struct sk_buff *skb_udp_tunnel_segment(
  				       netdev_features_t features)
  {
  	struct sk_buff *segs = ERR_PTR(-EINVAL);
+ 	u16 mac_offset = skb->mac_header;
  	int mac_len = skb->mac_len;
  	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
  	__be16 protocol = skb->protocol;
@@@ -2497,8 -2498,11 +2498,11 @@@
  	/* segment inner packet. */
  	enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
  	segs = skb_mac_gso_segment(skb, enc_features);
- 	if (!segs || IS_ERR(segs))
+ 	if (!segs || IS_ERR(segs)) {
+ 		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
+ 				     mac_len);
  		goto out;
+ 	}
  
  	outer_hlen = skb_tnl_header_len(skb);
  	skb = segs;
diff --combined net/ipv6/addrconf.c
index 6c16345,f62c72b..31f75ea
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@@ -442,8 -442,6 +442,8 @@@ static int inet6_netconf_msgsize_devcon
  	if (type == -1 || type == NETCONFA_MC_FORWARDING)
  		size += nla_total_size(4);
  #endif
 +	if (type == -1 || type == NETCONFA_PROXY_NEIGH)
 +		size += nla_total_size(4);
  
  	return size;
  }
@@@ -477,10 -475,6 +477,10 @@@ static int inet6_netconf_fill_devconf(s
  			devconf->mc_forwarding) < 0)
  		goto nla_put_failure;
  #endif
 +	if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
 +	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
 +		goto nla_put_failure;
 +
  	return nlmsg_end(skb, nlh);
  
  nla_put_failure:
@@@ -515,7 -509,6 +515,7 @@@ errout
  static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
  	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
  	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
 +	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
  };
  
  static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
@@@ -841,8 -834,6 +841,8 @@@ ipv6_add_addr(struct inet6_dev *idev, c
  		goto out;
  	}
  
 +	neigh_parms_data_state_setall(idev->nd_parms);
 +
  	ifa->addr = *addr;
  	if (peer_addr)
  		ifa->peer_addr = *peer_addr;
@@@ -995,9 -986,12 +995,9 @@@ static void ipv6_del_addr(struct inet6_
  	 * --yoshfuji
  	 */
  	if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
 -		struct in6_addr prefix;
  		struct rt6_info *rt;
  
 -		ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
 -
 -		rt = addrconf_get_prefix_route(&prefix,
 +		rt = addrconf_get_prefix_route(&ifp->addr,
  					       ifp->prefix_len,
  					       ifp->idev->dev,
  					       0, RTF_GATEWAY | RTF_DEFAULT);
@@@ -1030,7 -1024,7 +1030,7 @@@ static int ipv6_create_tempaddr(struct 
  	u32 addr_flags;
  	unsigned long now = jiffies;
  
 -	write_lock(&idev->lock);
 +	write_lock_bh(&idev->lock);
  	if (ift) {
  		spin_lock_bh(&ift->lock);
  		memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
@@@ -1042,7 -1036,7 +1042,7 @@@
  retry:
  	in6_dev_hold(idev);
  	if (idev->cnf.use_tempaddr <= 0) {
 -		write_unlock(&idev->lock);
 +		write_unlock_bh(&idev->lock);
  		pr_info("%s: use_tempaddr is disabled\n", __func__);
  		in6_dev_put(idev);
  		ret = -1;
@@@ -1052,7 -1046,7 +1052,7 @@@
  	if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
  		idev->cnf.use_tempaddr = -1;	/*XXX*/
  		spin_unlock_bh(&ifp->lock);
 -		write_unlock(&idev->lock);
 +		write_unlock_bh(&idev->lock);
  		pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
  			__func__);
  		in6_dev_put(idev);
@@@ -1077,8 -1071,8 +1077,8 @@@
  
  	regen_advance = idev->cnf.regen_max_retry *
  	                idev->cnf.dad_transmits *
 -	                idev->nd_parms->retrans_time / HZ;
 -	write_unlock(&idev->lock);
 +	                NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
 +	write_unlock_bh(&idev->lock);
  
  	/* A temporary address is created only if this calculated Preferred
  	 * Lifetime is greater than REGEN_ADVANCE time units.  In particular,
@@@ -1105,7 -1099,7 +1105,7 @@@
  		in6_dev_put(idev);
  		pr_info("%s: retry temporary address regeneration\n", __func__);
  		tmpaddr = &addr;
 -		write_lock(&idev->lock);
 +		write_lock_bh(&idev->lock);
  		goto retry;
  	}
  
@@@ -1413,7 -1407,7 +1413,7 @@@ try_nextdev
  EXPORT_SYMBOL(ipv6_dev_get_saddr);
  
  int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
 -		      unsigned char banned_flags)
 +		      u32 banned_flags)
  {
  	struct inet6_ifaddr *ifp;
  	int err = -EADDRNOTAVAIL;
@@@ -1430,7 -1424,7 +1430,7 @@@
  }
  
  int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
 -		    unsigned char banned_flags)
 +		    u32 banned_flags)
  {
  	struct inet6_dev *idev;
  	int err = -EADDRNOTAVAIL;
@@@ -1677,7 -1671,7 +1677,7 @@@ void addrconf_leave_solict(struct inet6
  static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
  {
  	struct in6_addr addr;
- 	if (ifp->prefix_len == 127) /* RFC 6164 */
+ 	if (ifp->prefix_len >= 127) /* RFC 6164 */
  		return;
  	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
  	if (ipv6_addr_any(&addr))
@@@ -1688,7 -1682,7 +1688,7 @@@
  static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
  {
  	struct in6_addr addr;
- 	if (ifp->prefix_len == 127) /* RFC 6164 */
+ 	if (ifp->prefix_len >= 127) /* RFC 6164 */
  		return;
  	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
  	if (ipv6_addr_any(&addr))
@@@ -1894,8 -1888,7 +1894,8 @@@ static void ipv6_regen_rndid(unsigned l
  
  	expires = jiffies +
  		idev->cnf.temp_prefered_lft * HZ -
 -		idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
 +		idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
 +		NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
  		idev->cnf.max_desync_factor * HZ;
  	if (time_before(expires, jiffies)) {
  		pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
@@@ -2023,73 -2016,6 +2023,73 @@@ static struct inet6_dev *addrconf_add_d
  	return idev;
  }
  
 +static void manage_tempaddrs(struct inet6_dev *idev,
 +			     struct inet6_ifaddr *ifp,
 +			     __u32 valid_lft, __u32 prefered_lft,
 +			     bool create, unsigned long now)
 +{
 +	u32 flags;
 +	struct inet6_ifaddr *ift;
 +
 +	read_lock_bh(&idev->lock);
 +	/* update all temporary addresses in the list */
 +	list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
 +		int age, max_valid, max_prefered;
 +
 +		if (ifp != ift->ifpub)
 +			continue;
 +
 +		/* RFC 4941 section 3.3:
 +		 * If a received option will extend the lifetime of a public
 +		 * address, the lifetimes of temporary addresses should
 +		 * be extended, subject to the overall constraint that no
 +		 * temporary addresses should ever remain "valid" or "preferred"
 +		 * for a time longer than (TEMP_VALID_LIFETIME) or
 +		 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
 +		 */
 +		age = (now - ift->cstamp) / HZ;
 +		max_valid = idev->cnf.temp_valid_lft - age;
 +		if (max_valid < 0)
 +			max_valid = 0;
 +
 +		max_prefered = idev->cnf.temp_prefered_lft -
 +			       idev->cnf.max_desync_factor - age;
 +		if (max_prefered < 0)
 +			max_prefered = 0;
 +
 +		if (valid_lft > max_valid)
 +			valid_lft = max_valid;
 +
 +		if (prefered_lft > max_prefered)
 +			prefered_lft = max_prefered;
 +
 +		spin_lock(&ift->lock);
 +		flags = ift->flags;
 +		ift->valid_lft = valid_lft;
 +		ift->prefered_lft = prefered_lft;
 +		ift->tstamp = now;
 +		if (prefered_lft > 0)
 +			ift->flags &= ~IFA_F_DEPRECATED;
 +
 +		spin_unlock(&ift->lock);
 +		if (!(flags&IFA_F_TENTATIVE))
 +			ipv6_ifa_notify(0, ift);
 +	}
 +
 +	if ((create || list_empty(&idev->tempaddr_list)) &&
 +	    idev->cnf.use_tempaddr > 0) {
 +		/* When a new public address is created as described
 +		 * in [ADDRCONF], also create a new temporary address.
 +		 * Also create a temporary address if it's enabled but
 +		 * no temporary address currently exists.
 +		 */
 +		read_unlock_bh(&idev->lock);
 +		ipv6_create_tempaddr(ifp, NULL);
 +	} else {
 +		read_unlock_bh(&idev->lock);
 +	}
 +}
 +
  void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
  {
  	struct prefix_info *pinfo;
@@@ -2244,7 -2170,6 +2244,7 @@@ ok
  				return;
  			}
  
 +			ifp->flags |= IFA_F_MANAGETEMPADDR;
  			update_lft = 0;
  			create = 1;
  			ifp->cstamp = jiffies;
@@@ -2253,8 -2178,9 +2253,8 @@@
  		}
  
  		if (ifp) {
 -			int flags;
 +			u32 flags;
  			unsigned long now;
 -			struct inet6_ifaddr *ift;
  			u32 stored_lft;
  
  			/* update lifetime (RFC2462 5.5.3 e) */
@@@ -2295,8 -2221,70 +2295,8 @@@
  			} else
  				spin_unlock(&ifp->lock);
  
 -			read_lock_bh(&in6_dev->lock);
 -			/* update all temporary addresses in the list */
 -			list_for_each_entry(ift, &in6_dev->tempaddr_list,
 -					    tmp_list) {
 -				int age, max_valid, max_prefered;
 -
 -				if (ifp != ift->ifpub)
 -					continue;
 -
 -				/*
 -				 * RFC 4941 section 3.3:
 -				 * If a received option will extend the lifetime
 -				 * of a public address, the lifetimes of
 -				 * temporary addresses should be extended,
 -				 * subject to the overall constraint that no
 -				 * temporary addresses should ever remain
 -				 * "valid" or "preferred" for a time longer than
 -				 * (TEMP_VALID_LIFETIME) or
 -				 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
 -				 * respectively.
 -				 */
 -				age = (now - ift->cstamp) / HZ;
 -				max_valid = in6_dev->cnf.temp_valid_lft - age;
 -				if (max_valid < 0)
 -					max_valid = 0;
 -
 -				max_prefered = in6_dev->cnf.temp_prefered_lft -
 -					       in6_dev->cnf.max_desync_factor -
 -					       age;
 -				if (max_prefered < 0)
 -					max_prefered = 0;
 -
 -				if (valid_lft > max_valid)
 -					valid_lft = max_valid;
 -
 -				if (prefered_lft > max_prefered)
 -					prefered_lft = max_prefered;
 -
 -				spin_lock(&ift->lock);
 -				flags = ift->flags;
 -				ift->valid_lft = valid_lft;
 -				ift->prefered_lft = prefered_lft;
 -				ift->tstamp = now;
 -				if (prefered_lft > 0)
 -					ift->flags &= ~IFA_F_DEPRECATED;
 -
 -				spin_unlock(&ift->lock);
 -				if (!(flags&IFA_F_TENTATIVE))
 -					ipv6_ifa_notify(0, ift);
 -			}
 -
 -			if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
 -				/*
 -				 * When a new public address is created as
 -				 * described in [ADDRCONF], also create a new
 -				 * temporary address. Also create a temporary
 -				 * address if it's enabled but no temporary
 -				 * address currently exists.
 -				 */
 -				read_unlock_bh(&in6_dev->lock);
 -				ipv6_create_tempaddr(ifp, NULL);
 -			} else {
 -				read_unlock_bh(&in6_dev->lock);
 -			}
 +			manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
 +					 create, now);
  
  			in6_ifa_put(ifp);
  			addrconf_verify(0);
@@@ -2375,11 -2363,10 +2375,11 @@@ err_exit
  /*
   *	Manual configuration of address on an interface
   */
 -static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
 +static int inet6_addr_add(struct net *net, int ifindex,
 +			  const struct in6_addr *pfx,
  			  const struct in6_addr *peer_pfx,
 -			  unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
 -			  __u32 valid_lft)
 +			  unsigned int plen, __u32 ifa_flags,
 +			  __u32 prefered_lft, __u32 valid_lft)
  {
  	struct inet6_ifaddr *ifp;
  	struct inet6_dev *idev;
@@@ -2398,9 -2385,6 +2398,9 @@@
  	if (!valid_lft || prefered_lft > valid_lft)
  		return -EINVAL;
  
 +	if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
 +		return -EINVAL;
 +
  	dev = __dev_get_by_index(net, ifindex);
  	if (!dev)
  		return -ENODEV;
@@@ -2441,9 -2425,6 +2441,9 @@@
  		 * manually configured addresses
  		 */
  		addrconf_dad_start(ifp);
 +		if (ifa_flags & IFA_F_MANAGETEMPADDR)
 +			manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
 +					 true, jiffies);
  		in6_ifa_put(ifp);
  		addrconf_verify(0);
  		return 0;
@@@ -3195,8 -3176,7 +3195,8 @@@ static void addrconf_dad_timer(unsigne
  	}
  
  	ifp->dad_probes--;
 -	addrconf_mod_dad_timer(ifp, ifp->idev->nd_parms->retrans_time);
 +	addrconf_mod_dad_timer(ifp,
 +			       NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
  	spin_unlock(&ifp->lock);
  	write_unlock(&idev->lock);
  
@@@ -3376,7 -3356,7 +3376,7 @@@ static int if6_seq_show(struct seq_fil
  		   ifp->idev->dev->ifindex,
  		   ifp->prefix_len,
  		   ifp->scope,
 -		   ifp->flags,
 +		   (u8) ifp->flags,
  		   ifp->idev->dev->name);
  	return 0;
  }
@@@ -3476,7 -3456,12 +3476,12 @@@ restart
  					 &inet6_addr_lst[i], addr_lst) {
  			unsigned long age;
  
- 			if (ifp->flags & IFA_F_PERMANENT)
+ 			/* When setting preferred_lft to a value not zero or
+ 			 * infinity, while valid_lft is infinity
+ 			 * IFA_F_PERMANENT has a non-infinity life time.
+ 			 */
+ 			if ((ifp->flags & IFA_F_PERMANENT) &&
+ 			    (ifp->prefered_lft == INFINITY_LIFE_TIME))
  				continue;
  
  			spin_lock(&ifp->lock);
@@@ -3501,7 -3486,8 +3506,8 @@@
  					ifp->flags |= IFA_F_DEPRECATED;
  				}
  
- 				if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))
+ 				if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
+ 				    (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
  					next = ifp->tstamp + ifp->valid_lft * HZ;
  
  				spin_unlock(&ifp->lock);
@@@ -3517,7 -3503,7 +3523,7 @@@
  				   !(ifp->flags&IFA_F_TENTATIVE)) {
  				unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
  					ifp->idev->cnf.dad_transmits *
 -					ifp->idev->nd_parms->retrans_time / HZ;
 +					NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
  
  				if (age >= ifp->prefered_lft - regen_advance) {
  					struct inet6_ifaddr *ifpub = ifp->ifpub;
@@@ -3592,7 -3578,6 +3598,7 @@@ static const struct nla_policy ifa_ipv6
  	[IFA_ADDRESS]		= { .len = sizeof(struct in6_addr) },
  	[IFA_LOCAL]		= { .len = sizeof(struct in6_addr) },
  	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
 +	[IFA_FLAGS]		= { .len = sizeof(u32) },
  };
  
  static int
@@@ -3616,21 -3601,16 +3622,21 @@@ inet6_rtm_deladdr(struct sk_buff *skb, 
  	return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen);
  }
  
 -static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
 +static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
  			     u32 prefered_lft, u32 valid_lft)
  {
  	u32 flags;
  	clock_t expires;
  	unsigned long timeout;
 +	bool was_managetempaddr;
  
  	if (!valid_lft || (prefered_lft > valid_lft))
  		return -EINVAL;
  
 +	if (ifa_flags & IFA_F_MANAGETEMPADDR &&
 +	    (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
 +		return -EINVAL;
 +
  	timeout = addrconf_timeout_fixup(valid_lft, HZ);
  	if (addrconf_finite_timeout(timeout)) {
  		expires = jiffies_to_clock_t(timeout * HZ);
@@@ -3650,10 -3630,7 +3656,10 @@@
  	}
  
  	spin_lock_bh(&ifp->lock);
 -	ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags;
 +	was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
 +	ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
 +			IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR);
 +	ifp->flags |= ifa_flags;
  	ifp->tstamp = jiffies;
  	ifp->valid_lft = valid_lft;
  	ifp->prefered_lft = prefered_lft;
@@@ -3664,14 -3641,6 +3670,14 @@@
  
  	addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
  			      expires, flags);
 +
 +	if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
 +		if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
 +			valid_lft = prefered_lft = 0;
 +		manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
 +				 !was_managetempaddr, jiffies);
 +	}
 +
  	addrconf_verify(0);
  
  	return 0;
@@@ -3687,7 -3656,7 +3693,7 @@@ inet6_rtm_newaddr(struct sk_buff *skb, 
  	struct inet6_ifaddr *ifa;
  	struct net_device *dev;
  	u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
 -	u8 ifa_flags;
 +	u32 ifa_flags;
  	int err;
  
  	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
@@@ -3714,10 -3683,8 +3720,10 @@@
  	if (dev == NULL)
  		return -ENODEV;
  
 +	ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
 +
  	/* We ignore other flags so far. */
 -	ifa_flags = ifm->ifa_flags & (IFA_F_NODAD | IFA_F_HOMEADDRESS);
 +	ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR;
  
  	ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
  	if (ifa == NULL) {
@@@ -3741,7 -3708,7 +3747,7 @@@
  	return err;
  }
  
 -static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u8 flags,
 +static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
  			  u8 scope, int ifindex)
  {
  	struct ifaddrmsg *ifm;
@@@ -3784,8 -3751,7 +3790,8 @@@ static inline int inet6_ifaddr_msgsize(
  	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
  	       + nla_total_size(16) /* IFA_LOCAL */
  	       + nla_total_size(16) /* IFA_ADDRESS */
 -	       + nla_total_size(sizeof(struct ifa_cacheinfo));
 +	       + nla_total_size(sizeof(struct ifa_cacheinfo))
 +	       + nla_total_size(4)  /* IFA_FLAGS */;
  }
  
  static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
@@@ -3801,7 -3767,8 +3807,8 @@@
  	put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
  		      ifa->idev->dev->ifindex);
  
- 	if (!(ifa->flags&IFA_F_PERMANENT)) {
+ 	if (!((ifa->flags&IFA_F_PERMANENT) &&
+ 	      (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
  		preferred = ifa->prefered_lft;
  		valid = ifa->valid_lft;
  		if (preferred != INFINITY_LIFE_TIME) {
@@@ -3833,9 -3800,6 +3840,9 @@@
  	if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
  		goto error;
  
 +	if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
 +		goto error;
 +
  	return nlmsg_end(skb, nlh);
  
  error:
@@@ -4239,7 -4203,7 +4246,7 @@@ static int inet6_fill_ifla6_attrs(struc
  	ci.max_reasm_len = IPV6_MAXPLEN;
  	ci.tstamp = cstamp_delta(idev->tstamp);
  	ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
 -	ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
 +	ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
  	if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
  		goto nla_put_failure;
  	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
@@@ -4732,46 -4696,6 +4739,46 @@@ int addrconf_sysctl_disable(struct ctl_
  	return ret;
  }
  
 +static
 +int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
 +			      void __user *buffer, size_t *lenp, loff_t *ppos)
 +{
 +	int *valp = ctl->data;
 +	int ret;
 +	int old, new;
 +
 +	old = *valp;
 +	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 +	new = *valp;
 +
 +	if (write && old != new) {
 +		struct net *net = ctl->extra2;
 +
 +		if (!rtnl_trylock())
 +			return restart_syscall();
 +
 +		if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
 +			inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
 +						     NETCONFA_IFINDEX_DEFAULT,
 +						     net->ipv6.devconf_dflt);
 +		else if (valp == &net->ipv6.devconf_all->proxy_ndp)
 +			inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
 +						     NETCONFA_IFINDEX_ALL,
 +						     net->ipv6.devconf_all);
 +		else {
 +			struct inet6_dev *idev = ctl->extra1;
 +
 +			inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
 +						     idev->dev->ifindex,
 +						     &idev->cnf);
 +		}
 +		rtnl_unlock();
 +	}
 +
 +	return ret;
 +}
 +
 +
  static struct addrconf_sysctl_table
  {
  	struct ctl_table_header *sysctl_header;
@@@ -4958,7 -4882,7 +4965,7 @@@
  			.data		= &ipv6_devconf.proxy_ndp,
  			.maxlen		= sizeof(int),
  			.mode		= 0644,
 -			.proc_handler	= proc_dointvec,
 +			.proc_handler	= addrconf_sysctl_proxy_ndp,
  		},
  		{
  			.procname	= "accept_source_route",
@@@ -5074,7 -4998,7 +5081,7 @@@ static void __addrconf_sysctl_unregiste
  
  static void addrconf_sysctl_register(struct inet6_dev *idev)
  {
 -	neigh_sysctl_register(idev->dev, idev->nd_parms, "ipv6",
 +	neigh_sysctl_register(idev->dev, idev->nd_parms,
  			      &ndisc_ifinfo_sysctl_change);
  	__addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
  					idev, &idev->cnf);
@@@ -5207,7 -5131,9 +5214,7 @@@ int __init addrconf_init(void
  
  	addrconf_verify(0);
  
 -	err = rtnl_af_register(&inet6_ops);
 -	if (err < 0)
 -		goto errout_af;
 +	rtnl_af_register(&inet6_ops);
  
  	err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
  			      NULL);
@@@ -5231,6 -5157,7 +5238,6 @@@
  	return 0;
  errout:
  	rtnl_af_unregister(&inet6_ops);
 -errout_af:
  	unregister_netdevice_notifier(&ipv6_dev_notf);
  errlo:
  	unregister_pernet_subsys(&addrconf_ops);
diff --combined net/ipv6/ip6_output.c
index 788c01a,e6f9319..d1de956
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@@ -336,8 -336,7 +336,8 @@@ int ip6_forward(struct sk_buff *skb
  		goto drop;
  
  	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
 -		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 +		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 +				 IPSTATS_MIB_INDISCARDS);
  		goto drop;
  	}
  
@@@ -371,8 -370,8 +371,8 @@@
  		/* Force OUTPUT device used as source address */
  		skb->dev = dst->dev;
  		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
 -		IP6_INC_STATS_BH(net,
 -				 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
 +		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 +				 IPSTATS_MIB_INHDRERRORS);
  
  		kfree_skb(skb);
  		return -ETIMEDOUT;
@@@ -385,15 -384,14 +385,15 @@@
  		if (proxied > 0)
  			return ip6_input(skb);
  		else if (proxied < 0) {
 -			IP6_INC_STATS(net, ip6_dst_idev(dst),
 -				      IPSTATS_MIB_INDISCARDS);
 +			IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 +					 IPSTATS_MIB_INDISCARDS);
  			goto drop;
  		}
  	}
  
  	if (!xfrm6_route_forward(skb)) {
 -		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 +		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 +				 IPSTATS_MIB_INDISCARDS);
  		goto drop;
  	}
  	dst = skb_dst(skb);
@@@ -450,17 -448,16 +450,17 @@@
  		/* Again, force OUTPUT device used as source address */
  		skb->dev = dst->dev;
  		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 -		IP6_INC_STATS_BH(net,
 -				 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
 -		IP6_INC_STATS_BH(net,
 -				 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
 +		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 +				 IPSTATS_MIB_INTOOBIGERRORS);
 +		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 +				 IPSTATS_MIB_FRAGFAILS);
  		kfree_skb(skb);
  		return -EMSGSIZE;
  	}
  
  	if (skb_cow(skb, dst->dev->hard_header_len)) {
 -		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
 +		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 +				 IPSTATS_MIB_OUTDISCARDS);
  		goto drop;
  	}
  
@@@ -941,6 -938,7 +941,6 @@@ EXPORT_SYMBOL_GPL(ip6_dst_lookup)
   *	@sk: socket which provides route info
   *	@fl6: flow to lookup
   *	@final_dst: final destination address for ipsec lookup
 - *	@can_sleep: we are in a sleepable context
   *
   *	This function performs a route lookup on the given flow.
   *
@@@ -948,7 -946,8 +948,7 @@@
   *	error code.
   */
  struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 -				      const struct in6_addr *final_dst,
 -				      bool can_sleep)
 +				      const struct in6_addr *final_dst)
  {
  	struct dst_entry *dst = NULL;
  	int err;
@@@ -958,6 -957,8 +958,6 @@@
  		return ERR_PTR(err);
  	if (final_dst)
  		fl6->daddr = *final_dst;
 -	if (can_sleep)
 -		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
  
  	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
  }
@@@ -968,6 -969,7 +968,6 @@@ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow)
   *	@sk: socket which provides the dst cache and route info
   *	@fl6: flow to lookup
   *	@final_dst: final destination address for ipsec lookup
 - *	@can_sleep: we are in a sleepable context
   *
   *	This function performs a route lookup on the given flow with the
   *	possibility of using the cached route in the socket if it is valid.
@@@ -978,7 -980,8 +978,7 @@@
   *	error code.
   */
  struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 -					 const struct in6_addr *final_dst,
 -					 bool can_sleep)
 +					 const struct in6_addr *final_dst)
  {
  	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
  	int err;
@@@ -990,6 -993,8 +990,6 @@@
  		return ERR_PTR(err);
  	if (final_dst)
  		fl6->daddr = *final_dst;
 -	if (can_sleep)
 -		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
  
  	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
  }
@@@ -1157,10 -1162,10 +1157,10 @@@ int ip6_append_data(struct sock *sk, in
  		np->cork.hop_limit = hlimit;
  		np->cork.tclass = tclass;
  		if (rt->dst.flags & DST_XFRM_TUNNEL)
 -			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
 +			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
  			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
  		else
 -			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
 +			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
  			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
  		if (np->frag_size < mtu) {
  			if (np->frag_size)
@@@ -1188,11 -1193,35 +1188,35 @@@
  
  	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
  			(opt ? opt->opt_nflen : 0);
- 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
+ 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
+ 		     sizeof(struct frag_hdr);
  
  	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
- 		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
- 			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
+ 		unsigned int maxnonfragsize, headersize;
+ 
+ 		headersize = sizeof(struct ipv6hdr) +
+ 			     (opt ? opt->tot_len : 0) +
+ 			     (dst_allfrag(&rt->dst) ?
+ 			      sizeof(struct frag_hdr) : 0) +
+ 			     rt->rt6i_nfheader_len;
+ 
+ 		maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
+ 				 mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+ 
+ 		/* dontfrag active */
+ 		if ((cork->length + length > mtu - headersize) && dontfrag &&
+ 		    (sk->sk_protocol == IPPROTO_UDP ||
+ 		     sk->sk_protocol == IPPROTO_RAW)) {
+ 			ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
+ 						   sizeof(struct ipv6hdr));
+ 			goto emsgsize;
+ 		}
+ 
+ 		if (cork->length + length > maxnonfragsize - headersize) {
+ emsgsize:
+ 			ipv6_local_error(sk, EMSGSIZE, fl6,
+ 					 mtu - headersize +
+ 					 sizeof(struct ipv6hdr));
  			return -EMSGSIZE;
  		}
  	}
@@@ -1217,12 -1246,6 +1241,6 @@@
  	 * --yoshfuji
  	 */
  
- 	if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
- 					   sk->sk_protocol == IPPROTO_RAW)) {
- 		ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
- 		return -EMSGSIZE;
- 	}
- 
  	skb = skb_peek_tail(&sk->sk_write_queue);
  	cork->length += length;
  	if (((length > mtu) ||
@@@ -1262,7 -1285,7 +1280,7 @@@ alloc_new_skb
  			if (skb == NULL || skb_prev == NULL)
  				ip6_append_data_mtu(&mtu, &maxfraglen,
  						    fragheaderlen, skb, rt,
 -						    np->pmtudisc ==
 +						    np->pmtudisc >=
  						    IPV6_PMTUDISC_PROBE);
  
  			skb_prev = skb;
diff --combined net/ipv6/ip6_tunnel.c
index 0289421,7881965..1e5e240
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@@ -29,6 -29,7 +29,6 @@@
  #include <linux/if.h>
  #include <linux/in.h>
  #include <linux/ip.h>
 -#include <linux/if_tunnel.h>
  #include <linux/net.h>
  #include <linux/in6.h>
  #include <linux/netdevice.h>
@@@ -69,6 -70,7 +69,6 @@@ MODULE_ALIAS_NETDEV("ip6tnl0")
  #define IP6_TNL_TRACE(x...) do {;} while(0)
  #endif
  
 -#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
  #define IPV6_TCLASS_SHIFT 20
  
  #define HASH_SIZE_SHIFT  5
@@@ -101,17 -103,25 +101,26 @@@ struct ip6_tnl_net 
  
  static struct net_device_stats *ip6_get_stats(struct net_device *dev)
  {
- 	struct pcpu_sw_netstats sum = { 0 };
 -	struct pcpu_tstats tmp, sum = { 0 };
++	struct pcpu_sw_netstats tmp, sum = { 0 };
  	int i;
  
  	for_each_possible_cpu(i) {
+ 		unsigned int start;
 -		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
 +		const struct pcpu_sw_netstats *tstats =
 +						   per_cpu_ptr(dev->tstats, i);
  
- 		sum.rx_packets += tstats->rx_packets;
- 		sum.rx_bytes   += tstats->rx_bytes;
- 		sum.tx_packets += tstats->tx_packets;
- 		sum.tx_bytes   += tstats->tx_bytes;
+ 		do {
+ 			start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ 			tmp.rx_packets = tstats->rx_packets;
+ 			tmp.rx_bytes = tstats->rx_bytes;
+ 			tmp.tx_packets = tstats->tx_packets;
+ 			tmp.tx_bytes =  tstats->tx_bytes;
+ 		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+ 
+ 		sum.rx_packets += tmp.rx_packets;
+ 		sum.rx_bytes   += tmp.rx_bytes;
+ 		sum.tx_packets += tmp.tx_packets;
+ 		sum.tx_bytes   += tmp.tx_bytes;
  	}
  	dev->stats.rx_packets = sum.rx_packets;
  	dev->stats.rx_bytes   = sum.rx_bytes;
@@@ -784,7 -794,7 +793,7 @@@ static int ip6_tnl_rcv(struct sk_buff *
  
  	if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
  					&ipv6h->daddr)) != NULL) {
 -		struct pcpu_tstats *tstats;
 +		struct pcpu_sw_netstats *tstats;
  
  		if (t->parms.proto != ipproto && t->parms.proto != 0) {
  			rcu_read_unlock();
@@@ -823,8 -833,10 +832,10 @@@
  		}
  
  		tstats = this_cpu_ptr(t->dev->tstats);
+ 		u64_stats_update_begin(&tstats->syncp);
  		tstats->rx_packets++;
  		tstats->rx_bytes += skb->len;
+ 		u64_stats_update_end(&tstats->syncp);
  
  		netif_rx(skb);
  
@@@ -1130,7 -1142,7 +1141,7 @@@ ip6ip6_tnl_xmit(struct sk_buff *skb, st
  	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
  		fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
  	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
 -		fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
 +		fl6.flowlabel |= ip6_flowlabel(ipv6h);
  	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
  		fl6.flowi6_mark = skb->mark;
  
@@@ -1497,12 -1509,12 +1508,12 @@@ ip6_tnl_dev_init_gen(struct net_device 
  
  	t->dev = dev;
  	t->net = dev_net(dev);
 -	dev->tstats = alloc_percpu(struct pcpu_tstats);
 +	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
 -		struct pcpu_tstats *ip6_tnl_stats;
 +		struct pcpu_sw_netstats *ip6_tnl_stats;
  		ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
  		u64_stats_init(&ip6_tnl_stats->syncp);
  	}
diff --combined net/ipv6/ip6_vti.c
index da1d9e4,a4564b0..b50acd5
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@@ -24,6 -24,7 +24,6 @@@
  #include <linux/if.h>
  #include <linux/in.h>
  #include <linux/ip.h>
 -#include <linux/if_tunnel.h>
  #include <linux/net.h>
  #include <linux/in6.h>
  #include <linux/netdevice.h>
@@@ -74,27 -75,6 +74,6 @@@ struct vti6_net 
  	struct ip6_tnl __rcu **tnls[2];
  };
  
- static struct net_device_stats *vti6_get_stats(struct net_device *dev)
- {
- 	struct pcpu_sw_netstats sum = { 0 };
- 	int i;
- 
- 	for_each_possible_cpu(i) {
- 		const struct pcpu_sw_netstats *tstats =
- 						   per_cpu_ptr(dev->tstats, i);
- 
- 		sum.rx_packets += tstats->rx_packets;
- 		sum.rx_bytes   += tstats->rx_bytes;
- 		sum.tx_packets += tstats->tx_packets;
- 		sum.tx_bytes   += tstats->tx_bytes;
- 	}
- 	dev->stats.rx_packets = sum.rx_packets;
- 	dev->stats.rx_bytes   = sum.rx_bytes;
- 	dev->stats.tx_packets = sum.tx_packets;
- 	dev->stats.tx_bytes   = sum.tx_bytes;
- 	return &dev->stats;
- }
- 
  #define for_each_vti6_tunnel_rcu(start) \
  	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
  
@@@ -312,7 -292,7 +291,7 @@@ static int vti6_rcv(struct sk_buff *skb
  
  	if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
  				 &ipv6h->daddr)) != NULL) {
 -		struct pcpu_tstats *tstats;
 +		struct pcpu_sw_netstats *tstats;
  
  		if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
  			rcu_read_unlock();
@@@ -331,8 -311,10 +310,10 @@@
  		}
  
  		tstats = this_cpu_ptr(t->dev->tstats);
+ 		u64_stats_update_begin(&tstats->syncp);
  		tstats->rx_packets++;
  		tstats->rx_bytes += skb->len;
+ 		u64_stats_update_end(&tstats->syncp);
  
  		skb->mark = 0;
  		secpath_reset(skb);
@@@ -716,7 -698,7 +697,7 @@@ static const struct net_device_ops vti6
  	.ndo_start_xmit = vti6_tnl_xmit,
  	.ndo_do_ioctl	= vti6_ioctl,
  	.ndo_change_mtu = vti6_change_mtu,
- 	.ndo_get_stats	= vti6_get_stats,
+ 	.ndo_get_stats64 = ip_tunnel_get_stats64,
  };
  
  /**
@@@ -753,7 -735,7 +734,7 @@@ static inline int vti6_dev_init_gen(str
  
  	t->dev = dev;
  	t->net = dev_net(dev);
 -	dev->tstats = alloc_percpu(struct pcpu_tstats);
 +	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  	return 0;
diff --combined net/ipv6/route.c
index 266f110,4b4944c..11dac21
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@@ -66,9 -66,8 +66,9 @@@
  #endif
  
  enum rt6_nud_state {
 -	RT6_NUD_FAIL_HARD = -2,
 -	RT6_NUD_FAIL_SOFT = -1,
 +	RT6_NUD_FAIL_HARD = -3,
 +	RT6_NUD_FAIL_PROBE = -2,
 +	RT6_NUD_FAIL_DO_RR = -1,
  	RT6_NUD_SUCCEED = 1
  };
  
@@@ -104,36 -103,6 +104,36 @@@ static struct rt6_info *rt6_get_route_i
  					   const struct in6_addr *gwaddr, int ifindex);
  #endif
  
 +static void rt6_bind_peer(struct rt6_info *rt, int create)
 +{
 +	struct inet_peer_base *base;
 +	struct inet_peer *peer;
 +
 +	base = inetpeer_base_ptr(rt->_rt6i_peer);
 +	if (!base)
 +		return;
 +
 +	peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
 +	if (peer) {
 +		if (!rt6_set_peer(rt, peer))
 +			inet_putpeer(peer);
 +	}
 +}
 +
 +static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
 +{
 +	if (rt6_has_peer(rt))
 +		return rt6_peer_ptr(rt);
 +
 +	rt6_bind_peer(rt, create);
 +	return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
 +}
 +
 +static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
 +{
 +	return __rt6_get_peer(rt, 1);
 +}
 +
  static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
  {
  	struct rt6_info *rt = (struct rt6_info *) dst;
@@@ -342,6 -311,22 +342,6 @@@ static void ip6_dst_destroy(struct dst_
  	}
  }
  
 -void rt6_bind_peer(struct rt6_info *rt, int create)
 -{
 -	struct inet_peer_base *base;
 -	struct inet_peer *peer;
 -
 -	base = inetpeer_base_ptr(rt->_rt6i_peer);
 -	if (!base)
 -		return;
 -
 -	peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
 -	if (peer) {
 -		if (!rt6_set_peer(rt, peer))
 -			inet_putpeer(peer);
 -	}
 -}
 -
  static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
  			   int how)
  {
@@@ -536,7 -521,7 +536,7 @@@ static void rt6_probe(struct rt6_info *
  		work = kmalloc(sizeof(*work), GFP_ATOMIC);
  
  		if (neigh && work)
 -			neigh->updated = jiffies;
 +			__neigh_set_probe_once(neigh);
  
  		if (neigh)
  			write_unlock(&neigh->lock);
@@@ -592,13 -577,11 +592,13 @@@ static inline enum rt6_nud_state rt6_ch
  #ifdef CONFIG_IPV6_ROUTER_PREF
  		else if (!(neigh->nud_state & NUD_FAILED))
  			ret = RT6_NUD_SUCCEED;
 +		else
 +			ret = RT6_NUD_FAIL_PROBE;
  #endif
  		read_unlock(&neigh->lock);
  	} else {
  		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
 -		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
 +		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
  	}
  	rcu_read_unlock_bh();
  
@@@ -635,17 -618,16 +635,17 @@@ static struct rt6_info *find_match(stru
  		goto out;
  
  	m = rt6_score_route(rt, oif, strict);
 -	if (m == RT6_NUD_FAIL_SOFT) {
 +	if (m == RT6_NUD_FAIL_DO_RR) {
  		match_do_rr = true;
  		m = 0; /* lowest valid score */
 -	} else if (m < 0) {
 +	} else if (m == RT6_NUD_FAIL_HARD) {
  		goto out;
  	}
  
  	if (strict & RT6_LOOKUP_F_REACHABLE)
  		rt6_probe(rt);
  
 +	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
  	if (m > *mpri) {
  		*do_rr = match_do_rr;
  		*mpri = m;
@@@ -1923,9 -1905,7 +1923,7 @@@ static struct rt6_info *ip6_rt_copy(str
  		else
  			rt->rt6i_gateway = *dest;
  		rt->rt6i_flags = ort->rt6i_flags;
- 		if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
- 		    (RTF_DEFAULT | RTF_ADDRCONF))
- 			rt6_set_from(rt, ort);
+ 		rt6_set_from(rt, ort);
  		rt->rt6i_metric = 0;
  
  #ifdef CONFIG_IPV6_SUBTREES
@@@ -2258,7 -2238,7 +2256,7 @@@ void rt6_remove_prefsrc(struct inet6_if
  		.net = net,
  		.addr = &ifp->addr,
  	};
 -	fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
 +	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
  }
  
  struct arg_dev_net {
@@@ -2285,7 -2265,7 +2283,7 @@@ void rt6_ifdown(struct net *net, struc
  		.net = net,
  	};
  
 -	fib6_clean_all(net, fib6_ifdown, 0, &adn);
 +	fib6_clean_all(net, fib6_ifdown, &adn);
  	icmp6_clean_all(fib6_ifdown, &adn);
  }
  
@@@ -2340,7 -2320,7 +2338,7 @@@ void rt6_mtu_change(struct net_device *
  		.mtu = mtu,
  	};
  
 -	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
 +	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
  }
  
  static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
diff --combined net/ipv6/sit.c
index 9937b26,d3005b3..3dfbcf1
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@@ -671,7 -671,7 +671,7 @@@ static int ipip6_rcv(struct sk_buff *sk
  	tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
  				     iph->saddr, iph->daddr);
  	if (tunnel != NULL) {
 -		struct pcpu_tstats *tstats;
 +		struct pcpu_sw_netstats *tstats;
  
  		if (tunnel->parms.iph.protocol != IPPROTO_IPV6 &&
  		    tunnel->parms.iph.protocol != 0)
@@@ -702,8 -702,10 +702,10 @@@
  		}
  
  		tstats = this_cpu_ptr(tunnel->dev->tstats);
+ 		u64_stats_update_begin(&tstats->syncp);
  		tstats->rx_packets++;
  		tstats->rx_bytes += skb->len;
+ 		u64_stats_update_end(&tstats->syncp);
  
  		netif_rx(skb);
  
@@@ -924,7 -926,7 +926,7 @@@ static netdev_tx_t ipip6_tunnel_xmit(st
  		if (tunnel->parms.iph.daddr && skb_dst(skb))
  			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
  
- 		if (skb->len > mtu) {
+ 		if (skb->len > mtu && !skb_is_gso(skb)) {
  			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  			ip_rt_put(rt);
  			goto tx_error;
@@@ -966,8 -968,10 +968,10 @@@
  	tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
  
  	skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
- 	if (IS_ERR(skb))
+ 	if (IS_ERR(skb)) {
+ 		ip_rt_put(rt);
  		goto out;
+ 	}
  
  	err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
  			    ttl, df, !net_eq(tunnel->net, dev_net(dev)));
@@@ -1361,12 -1365,12 +1365,12 @@@ static int ipip6_tunnel_init(struct net
  	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
  
  	ipip6_tunnel_bind_dev(dev);
 -	dev->tstats = alloc_percpu(struct pcpu_tstats);
 +	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
 -		struct pcpu_tstats *ipip6_tunnel_stats;
 +		struct pcpu_sw_netstats *ipip6_tunnel_stats;
  		ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
  		u64_stats_init(&ipip6_tunnel_stats->syncp);
  	}
@@@ -1391,12 -1395,12 +1395,12 @@@ static int __net_init ipip6_fb_tunnel_i
  	iph->ihl		= 5;
  	iph->ttl		= 64;
  
 -	dev->tstats = alloc_percpu(struct pcpu_tstats);
 +	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
 -		struct pcpu_tstats *ipip6_fb_stats;
 +		struct pcpu_sw_netstats *ipip6_fb_stats;
  		ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
  		u64_stats_init(&ipip6_fb_stats->syncp);
  	}
diff --combined net/netfilter/ipvs/ip_vs_nfct.c
index d5f4151,5a355a4..5882bbf
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@@ -19,7 -19,8 +19,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
 - * along with this program; if not, write to the Free Software
 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 + * along with this program; if not, see <http://www.gnu.org/licenses/>.
   *
   *
   * Authors:
@@@ -62,6 -63,7 +62,7 @@@
  #include <net/ip_vs.h>
  #include <net/netfilter/nf_conntrack_core.h>
  #include <net/netfilter/nf_conntrack_expect.h>
+ #include <net/netfilter/nf_conntrack_seqadj.h>
  #include <net/netfilter/nf_conntrack_helper.h>
  #include <net/netfilter/nf_conntrack_zones.h>
  
@@@ -96,6 -98,11 +97,11 @@@ ip_vs_update_conntrack(struct sk_buff *
  	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
  		return;
  
+ 	/* Applications may adjust TCP seqs */
+ 	if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
+ 	    !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
+ 		return;
+ 
  	/*
  	 * The connection is not yet in the hashtable, so we update it.
  	 * CIP->VIP will remain the same, so leave the tuple in
diff --combined net/netfilter/nf_tables_api.c
index 629b6da,71a9f49..1fcef1e
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@@ -180,8 -180,7 +180,8 @@@ static int nf_tables_fill_table_info(st
  	nfmsg->res_id		= 0;
  
  	if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
 -	    nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)))
 +	    nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
 +	    nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)))
  		goto nla_put_failure;
  
  	return nlmsg_end(skb, nlh);
@@@ -313,6 -312,9 +313,9 @@@ static int nf_tables_table_enable(struc
  	int err, i = 0;
  
  	list_for_each_entry(chain, &table->chains, list) {
+ 		if (!(chain->flags & NFT_BASE_CHAIN))
+ 			continue;
+ 
  		err = nf_register_hook(&nft_base_chain(chain)->ops);
  		if (err < 0)
  			goto err;
@@@ -322,6 -324,9 +325,9 @@@
  	return 0;
  err:
  	list_for_each_entry(chain, &table->chains, list) {
+ 		if (!(chain->flags & NFT_BASE_CHAIN))
+ 			continue;
+ 
  		if (i-- <= 0)
  			break;
  
@@@ -334,8 -339,10 +340,10 @@@ static int nf_tables_table_disable(stru
  {
  	struct nft_chain *chain;
  
- 	list_for_each_entry(chain, &table->chains, list)
- 		nf_unregister_hook(&nft_base_chain(chain)->ops);
+ 	list_for_each_entry(chain, &table->chains, list) {
+ 		if (chain->flags & NFT_BASE_CHAIN)
+ 			nf_unregister_hook(&nft_base_chain(chain)->ops);
+ 	}
  
  	return 0;
  }
@@@ -1924,14 -1931,12 +1932,14 @@@ static int nft_ctx_init_from_setattr(st
  {
  	struct net *net = sock_net(skb->sk);
  	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 -	const struct nft_af_info *afi;
 +	const struct nft_af_info *afi = NULL;
  	const struct nft_table *table = NULL;
  
 -	afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
 -	if (IS_ERR(afi))
 -		return PTR_ERR(afi);
 +	if (nfmsg->nfgen_family != NFPROTO_UNSPEC) {
 +		afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
 +		if (IS_ERR(afi))
 +			return PTR_ERR(afi);
 +	}
  
  	if (nla[NFTA_SET_TABLE] != NULL) {
  		table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
@@@ -1976,14 -1981,11 +1984,14 @@@ static int nf_tables_set_alloc_name(str
  			return -ENOMEM;
  
  		list_for_each_entry(i, &ctx->table->sets, list) {
 -			if (!sscanf(i->name, name, &n))
 +			int tmp;
 +
 +			if (!sscanf(i->name, name, &tmp))
  				continue;
 -			if (n < 0 || n > BITS_PER_LONG * PAGE_SIZE)
 +			if (tmp < 0 || tmp > BITS_PER_LONG * PAGE_SIZE)
  				continue;
 -			set_bit(n, inuse);
 +
 +			set_bit(tmp, inuse);
  		}
  
  		n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE);
@@@ -2100,21 -2102,25 +2108,25 @@@ done
  	return skb->len;
  }
  
 -static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
 -				   struct netlink_callback *cb)
 +static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
 +				      struct netlink_callback *cb)
  {
  	const struct nft_set *set;
- 	unsigned int idx = 0, s_idx = cb->args[0];
+ 	unsigned int idx, s_idx = cb->args[0];
  	struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
  
  	if (cb->args[1])
  		return skb->len;
  
  	list_for_each_entry(table, &ctx->afi->tables, list) {
- 		if (cur_table && cur_table != table)
- 			continue;
+ 		if (cur_table) {
+ 			if (cur_table != table)
+ 				continue;
  
+ 			cur_table = NULL;
+ 		}
  		ctx->table = table;
+ 		idx = 0;
  		list_for_each_entry(set, &ctx->table->sets, list) {
  			if (idx < s_idx)
  				goto cont;
@@@ -2133,61 -2139,6 +2145,61 @@@ done
  	return skb->len;
  }
  
 +static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
 +				   struct netlink_callback *cb)
 +{
 +	const struct nft_set *set;
 +	unsigned int idx, s_idx = cb->args[0];
 +	const struct nft_af_info *afi;
 +	struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
 +	struct net *net = sock_net(skb->sk);
 +	int cur_family = cb->args[3];
 +
 +	if (cb->args[1])
 +		return skb->len;
 +
 +	list_for_each_entry(afi, &net->nft.af_info, list) {
 +		if (cur_family) {
 +			if (afi->family != cur_family)
 +				continue;
 +
 +			cur_family = 0;
 +		}
 +
 +		list_for_each_entry(table, &afi->tables, list) {
 +			if (cur_table) {
 +				if (cur_table != table)
 +					continue;
 +
 +				cur_table = NULL;
 +			}
 +
 +			ctx->table = table;
 +			ctx->afi = afi;
 +			idx = 0;
 +			list_for_each_entry(set, &ctx->table->sets, list) {
 +				if (idx < s_idx)
 +					goto cont;
 +				if (nf_tables_fill_set(skb, ctx, set,
 +						       NFT_MSG_NEWSET,
 +						       NLM_F_MULTI) < 0) {
 +					cb->args[0] = idx;
 +					cb->args[2] = (unsigned long) table;
 +					cb->args[3] = afi->family;
 +					goto done;
 +				}
 +cont:
 +				idx++;
 +			}
 +			if (s_idx)
 +				s_idx = 0;
 +		}
 +	}
 +	cb->args[1] = 1;
 +done:
 +	return skb->len;
 +}
 +
  static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
  {
  	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@@ -2204,12 -2155,9 +2216,12 @@@
  	if (err < 0)
  		return err;
  
 -	if (ctx.table == NULL)
 -		ret = nf_tables_dump_sets_all(&ctx, skb, cb);
 -	else
 +	if (ctx.table == NULL) {
 +		if (ctx.afi == NULL)
 +			ret = nf_tables_dump_sets_all(&ctx, skb, cb);
 +		else
 +			ret = nf_tables_dump_sets_family(&ctx, skb, cb);
 +	} else
  		ret = nf_tables_dump_sets_table(&ctx, skb, cb);
  
  	return ret;
@@@ -2222,7 -2170,6 +2234,7 @@@ static int nf_tables_getset(struct soc
  	const struct nft_set *set;
  	struct nft_ctx ctx;
  	struct sk_buff *skb2;
 +	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  	int err;
  
  	/* Verify existance before starting dump */
@@@ -2237,10 -2184,6 +2249,10 @@@
  		return netlink_dump_start(nlsk, skb, nlh, &c);
  	}
  
 +	/* Only accept unspec with dump */
 +	if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
 +		return -EAFNOSUPPORT;
 +
  	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
  	if (IS_ERR(set))
  		return PTR_ERR(set);
@@@ -2410,7 -2353,6 +2422,7 @@@ static int nf_tables_delset(struct soc
  			    const struct nlmsghdr *nlh,
  			    const struct nlattr * const nla[])
  {
 +	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  	struct nft_set *set;
  	struct nft_ctx ctx;
  	int err;
@@@ -2422,9 -2364,6 +2434,9 @@@
  	if (err < 0)
  		return err;
  
 +	if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
 +		return -EAFNOSUPPORT;
 +
  	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
  	if (IS_ERR(set))
  		return PTR_ERR(set);
@@@ -2443,7 -2382,9 +2455,9 @@@ static int nf_tables_bind_check_setelem
  	enum nft_registers dreg;
  
  	dreg = nft_type_to_reg(set->dtype);
- 	return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+ 	return nft_validate_data_load(ctx, dreg, &elem->data,
+ 				      set->dtype == NFT_DATA_VERDICT ?
+ 				      NFT_DATA_VERDICT : NFT_DATA_VALUE);
  }
  
  int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
@@@ -2594,8 -2535,9 +2608,8 @@@ static int nf_tables_dump_set(struct sk
  	u32 portid, seq;
  	int event, err;
  
 -	nfmsg = nlmsg_data(cb->nlh);
 -	err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_ELEM_LIST_MAX,
 -			  nft_set_elem_list_policy);
 +	err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla,
 +			  NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy);
  	if (err < 0)
  		return err;
  
diff --combined net/netfilter/nfnetlink_log.c
index 7d4254b,a155d19..d292c8d
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@@ -28,6 -28,8 +28,6 @@@
  #include <linux/proc_fs.h>
  #include <linux/security.h>
  #include <linux/list.h>
 -#include <linux/jhash.h>
 -#include <linux/random.h>
  #include <linux/slab.h>
  #include <net/sock.h>
  #include <net/netfilter/nf_log.h>
@@@ -73,6 -75,7 +73,6 @@@ struct nfulnl_instance 
  };
  
  #define INSTANCE_BUCKETS	16
 -static unsigned int hash_init;
  
  static int nfnl_log_net_id __read_mostly;
  
@@@ -1050,6 -1053,7 +1050,7 @@@ static void __net_exit nfnl_log_net_exi
  #ifdef CONFIG_PROC_FS
  	remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
  #endif
+ 	nf_log_unset(net, &nfulnl_logger);
  }
  
  static struct pernet_operations nfnl_log_net_ops = {
@@@ -1063,6 -1067,11 +1064,6 @@@ static int __init nfnetlink_log_init(vo
  {
  	int status = -ENOMEM;
  
 -	/* it's not really all that important to have a random value, so
 -	 * we can do this from the init function, even if there hasn't
 -	 * been that much entropy yet */
 -	get_random_bytes(&hash_init, sizeof(hash_init));
 -
  	netlink_register_notifier(&nfulnl_rtnl_notifier);
  	status = nfnetlink_subsys_register(&nfulnl_subsys);
  	if (status < 0) {
diff --combined net/rose/af_rose.c
index 81f94b1,62ced65..d080eb4
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@@ -1012,7 -1012,7 +1012,7 @@@ int rose_rx_call_request(struct sk_buf
  	make_rose->source_call   = facilities.source_call;
  	make_rose->source_ndigis = facilities.source_ndigis;
  	for (n = 0 ; n < facilities.source_ndigis ; n++)
 -		make_rose->source_digis[n]= facilities.source_digis[n];
 +		make_rose->source_digis[n] = facilities.source_digis[n];
  	make_rose->neighbour     = neigh;
  	make_rose->device        = dev;
  	make_rose->facilities    = facilities;
@@@ -1253,6 -1253,7 +1253,7 @@@ static int rose_recvmsg(struct kiocb *i
  
  	if (msg->msg_name) {
  		struct sockaddr_rose *srose;
+ 		struct full_sockaddr_rose *full_srose = msg->msg_name;
  
  		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
  		srose = msg->msg_name;
@@@ -1260,18 -1261,9 +1261,9 @@@
  		srose->srose_addr   = rose->dest_addr;
  		srose->srose_call   = rose->dest_call;
  		srose->srose_ndigis = rose->dest_ndigis;
- 		if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
- 			struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
- 			for (n = 0 ; n < rose->dest_ndigis ; n++)
- 				full_srose->srose_digis[n] = rose->dest_digis[n];
- 			msg->msg_namelen = sizeof(struct full_sockaddr_rose);
- 		} else {
- 			if (rose->dest_ndigis >= 1) {
- 				srose->srose_ndigis = 1;
- 				srose->srose_digi = rose->dest_digis[0];
- 			}
- 			msg->msg_namelen = sizeof(struct sockaddr_rose);
- 		}
+ 		for (n = 0 ; n < rose->dest_ndigis ; n++)
+ 			full_srose->srose_digis[n] = rose->dest_digis[n];
+ 		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
  	}
  
  	skb_free_datagram(sk, skb);
diff --combined net/sched/act_csum.c
index 9cc6717,11fe1a4..8b1d657
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@@ -37,8 -37,15 +37,8 @@@
  #include <net/tc_act/tc_csum.h>
  
  #define CSUM_TAB_MASK 15
 -static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1];
  static u32 csum_idx_gen;
 -static DEFINE_RWLOCK(csum_lock);
 -
 -static struct tcf_hashinfo csum_hash_info = {
 -	.htab	= tcf_csum_ht,
 -	.hmask	= CSUM_TAB_MASK,
 -	.lock	= &csum_lock,
 -};
 +static struct tcf_hashinfo csum_hash_info;
  
  static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
  	[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
@@@ -70,16 -77,16 +70,16 @@@ static int tcf_csum_init(struct net *n
  				     &csum_idx_gen, &csum_hash_info);
  		if (IS_ERR(pc))
  			return PTR_ERR(pc);
- 		p = to_tcf_csum(pc);
  		ret = ACT_P_CREATED;
  	} else {
- 		p = to_tcf_csum(pc);
- 		if (!ovr) {
- 			tcf_hash_release(pc, bind, &csum_hash_info);
+ 		if (bind)/* dont override defaults */
+ 			return 0;
+ 		tcf_hash_release(pc, bind, &csum_hash_info);
+ 		if (!ovr)
  			return -EEXIST;
- 		}
  	}
  
+ 	p = to_tcf_csum(pc);
  	spin_lock_bh(&p->tcf_lock);
  	p->tcf_action = parm->action;
  	p->update_flags = parm->update_flags;
@@@ -586,10 -593,6 +586,10 @@@ MODULE_LICENSE("GPL")
  
  static int __init csum_init_module(void)
  {
 +	int err = tcf_hashinfo_init(&csum_hash_info, CSUM_TAB_MASK);
 +	if (err)
 +		return err;
 +
  	return tcf_register_action(&act_csum_ops);
  }
  
diff --combined net/sched/act_gact.c
index dea9273,eb9ba60..af5641c
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@@ -24,8 -24,15 +24,8 @@@
  #include <net/tc_act/tc_gact.h>
  
  #define GACT_TAB_MASK	15
 -static struct tcf_common *tcf_gact_ht[GACT_TAB_MASK + 1];
  static u32 gact_idx_gen;
 -static DEFINE_RWLOCK(gact_lock);
 -
 -static struct tcf_hashinfo gact_hash_info = {
 -	.htab	=	tcf_gact_ht,
 -	.hmask	=	GACT_TAB_MASK,
 -	.lock	=	&gact_lock,
 -};
 +static struct tcf_hashinfo gact_hash_info;
  
  #ifdef CONFIG_GACT_PROB
  static int gact_net_rand(struct tcf_gact *gact)
@@@ -95,10 -102,11 +95,11 @@@ static int tcf_gact_init(struct net *ne
  			return PTR_ERR(pc);
  		ret = ACT_P_CREATED;
  	} else {
- 		if (!ovr) {
- 			tcf_hash_release(pc, bind, &gact_hash_info);
+ 		if (bind)/* dont override defaults */
+ 			return 0;
+ 		tcf_hash_release(pc, bind, &gact_hash_info);
+ 		if (!ovr)
  			return -EEXIST;
- 		}
  	}
  
  	gact = to_gact(pc);
@@@ -208,9 -216,6 +209,9 @@@ MODULE_LICENSE("GPL")
  
  static int __init gact_init_module(void)
  {
 +	int err = tcf_hashinfo_init(&gact_hash_info, GACT_TAB_MASK);
 +	if (err)
 +		return err;
  #ifdef CONFIG_GACT_PROB
  	pr_info("GACT probability on\n");
  #else
@@@ -222,7 -227,6 +223,7 @@@
  static void __exit gact_cleanup_module(void)
  {
  	tcf_unregister_action(&act_gact_ops);
 +	tcf_hashinfo_destroy(&gact_hash_info);
  }
  
  module_init(gact_init_module);
diff --combined net/sched/act_ipt.c
index e13ecbb,dcbfe8c..2426369
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@@ -29,8 -29,15 +29,8 @@@
  
  
  #define IPT_TAB_MASK     15
 -static struct tcf_common *tcf_ipt_ht[IPT_TAB_MASK + 1];
  static u32 ipt_idx_gen;
 -static DEFINE_RWLOCK(ipt_lock);
 -
 -static struct tcf_hashinfo ipt_hash_info = {
 -	.htab	=	tcf_ipt_ht,
 -	.hmask	=	IPT_TAB_MASK,
 -	.lock	=	&ipt_lock,
 -};
 +static struct tcf_hashinfo ipt_hash_info;
  
  static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook)
  {
@@@ -134,10 -141,12 +134,12 @@@ static int tcf_ipt_init(struct net *net
  			return PTR_ERR(pc);
  		ret = ACT_P_CREATED;
  	} else {
- 		if (!ovr) {
- 			tcf_ipt_release(to_ipt(pc), bind);
+ 		if (bind)/* dont override defaults */
+ 			return 0;
+ 		tcf_ipt_release(to_ipt(pc), bind);
+ 
+ 		if (!ovr)
  			return -EEXIST;
- 		}
  	}
  	ipt = to_ipt(pc);
  
@@@ -313,11 -322,7 +315,11 @@@ MODULE_ALIAS("act_xt")
  
  static int __init ipt_init_module(void)
  {
 -	int ret1, ret2;
 +	int ret1, ret2, err;
 +	err = tcf_hashinfo_init(&ipt_hash_info, IPT_TAB_MASK);
 +	if (err)
 +		return err;
 +
  	ret1 = tcf_register_action(&act_xt_ops);
  	if (ret1 < 0)
  		printk("Failed to load xt action\n");
@@@ -325,10 -330,9 +327,10 @@@
  	if (ret2 < 0)
  		printk("Failed to load ipt action\n");
  
 -	if (ret1 < 0 && ret2 < 0)
 +	if (ret1 < 0 && ret2 < 0) {
 +		tcf_hashinfo_destroy(&ipt_hash_info);
  		return ret1;
 -	else
 +	} else
  		return 0;
  }
  
@@@ -336,7 -340,6 +338,7 @@@ static void __exit ipt_cleanup_module(v
  {
  	tcf_unregister_action(&act_xt_ops);
  	tcf_unregister_action(&act_ipt_ops);
 +	tcf_hashinfo_destroy(&ipt_hash_info);
  }
  
  module_init(ipt_init_module);
diff --combined net/sched/act_nat.c
index 921fea4,7686953..584e655
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@@ -30,9 -30,15 +30,9 @@@
  
  
  #define NAT_TAB_MASK	15
 -static struct tcf_common *tcf_nat_ht[NAT_TAB_MASK + 1];
  static u32 nat_idx_gen;
 -static DEFINE_RWLOCK(nat_lock);
  
 -static struct tcf_hashinfo nat_hash_info = {
 -	.htab	=	tcf_nat_ht,
 -	.hmask	=	NAT_TAB_MASK,
 -	.lock	=	&nat_lock,
 -};
 +static struct tcf_hashinfo nat_hash_info;
  
  static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
  	[TCA_NAT_PARMS]	= { .len = sizeof(struct tc_nat) },
@@@ -64,15 -70,15 +64,15 @@@ static int tcf_nat_init(struct net *net
  				     &nat_idx_gen, &nat_hash_info);
  		if (IS_ERR(pc))
  			return PTR_ERR(pc);
- 		p = to_tcf_nat(pc);
  		ret = ACT_P_CREATED;
  	} else {
- 		p = to_tcf_nat(pc);
- 		if (!ovr) {
- 			tcf_hash_release(pc, bind, &nat_hash_info);
+ 		if (bind)
+ 			return 0;
+ 		tcf_hash_release(pc, bind, &nat_hash_info);
+ 		if (!ovr)
  			return -EEXIST;
- 		}
  	}
+ 	p = to_tcf_nat(pc);
  
  	spin_lock_bh(&p->tcf_lock);
  	p->old_addr = parm->old_addr;
@@@ -310,16 -316,12 +310,16 @@@ MODULE_LICENSE("GPL")
  
  static int __init nat_init_module(void)
  {
 +	int err = tcf_hashinfo_init(&nat_hash_info, NAT_TAB_MASK);
 +	if (err)
 +		return err;
  	return tcf_register_action(&act_nat_ops);
  }
  
  static void __exit nat_cleanup_module(void)
  {
  	tcf_unregister_action(&act_nat_ops);
 +	tcf_hashinfo_destroy(&nat_hash_info);
  }
  
  module_init(nat_init_module);
diff --combined net/sched/act_pedit.c
index e2520e9,7aa2dcd..7291893
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@@ -24,9 -24,15 +24,9 @@@
  #include <net/tc_act/tc_pedit.h>
  
  #define PEDIT_TAB_MASK	15
 -static struct tcf_common *tcf_pedit_ht[PEDIT_TAB_MASK + 1];
  static u32 pedit_idx_gen;
 -static DEFINE_RWLOCK(pedit_lock);
  
 -static struct tcf_hashinfo pedit_hash_info = {
 -	.htab	=	tcf_pedit_ht,
 -	.hmask	=	PEDIT_TAB_MASK,
 -	.lock	=	&pedit_lock,
 -};
 +static struct tcf_hashinfo pedit_hash_info;
  
  static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
  	[TCA_PEDIT_PARMS]	= { .len = sizeof(struct tc_pedit) },
@@@ -78,10 -84,12 +78,12 @@@ static int tcf_pedit_init(struct net *n
  		ret = ACT_P_CREATED;
  	} else {
  		p = to_pedit(pc);
- 		if (!ovr) {
- 			tcf_hash_release(pc, bind, &pedit_hash_info);
+ 		tcf_hash_release(pc, bind, &pedit_hash_info);
+ 		if (bind)
+ 			return 0;
+ 		if (!ovr)
  			return -EEXIST;
- 		}
+ 
  		if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
  			keys = kmalloc(ksize, GFP_KERNEL);
  			if (keys == NULL)
@@@ -246,15 -254,11 +248,15 @@@ MODULE_LICENSE("GPL")
  
  static int __init pedit_init_module(void)
  {
 +	int err = tcf_hashinfo_init(&pedit_hash_info, PEDIT_TAB_MASK);
 +	if (err)
 +		return err;
  	return tcf_register_action(&act_pedit_ops);
  }
  
  static void __exit pedit_cleanup_module(void)
  {
 +	tcf_hashinfo_destroy(&pedit_hash_info);
  	tcf_unregister_action(&act_pedit_ops);
  }
  
diff --combined net/sched/act_police.c
index 819a9a4,ef246d8..9295b86
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@@ -41,8 -41,15 +41,8 @@@ struct tcf_police 
  	container_of(pc, struct tcf_police, common)
  
  #define POL_TAB_MASK     15
 -static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
  static u32 police_idx_gen;
 -static DEFINE_RWLOCK(police_lock);
 -
 -static struct tcf_hashinfo police_hash_info = {
 -	.htab	=	tcf_police_ht,
 -	.hmask	=	POL_TAB_MASK,
 -	.lock	=	&police_lock,
 -};
 +static struct tcf_hashinfo police_hash_info;
  
  /* old policer structure from before tc actions */
  struct tc_police_compat {
@@@ -60,19 -67,18 +60,19 @@@
  static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
  			      int type, struct tc_action *a)
  {
 +	struct hlist_head *head;
  	struct tcf_common *p;
  	int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
  	struct nlattr *nest;
  
 -	read_lock_bh(&police_lock);
 +	spin_lock_bh(&police_hash_info.lock);
  
  	s_i = cb->args[0];
  
  	for (i = 0; i < (POL_TAB_MASK + 1); i++) {
 -		p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
 +		head = &police_hash_info.htab[tcf_hash(i, POL_TAB_MASK)];
  
 -		for (; p; p = p->tcfc_next) {
 +		hlist_for_each_entry_rcu(p, head, tcfc_head) {
  			index++;
  			if (index < s_i)
  				continue;
@@@ -95,7 -101,7 +95,7 @@@
  		}
  	}
  done:
 -	read_unlock_bh(&police_lock);
 +	spin_unlock_bh(&police_hash_info.lock);
  	if (n_i)
  		cb->args[0] += n_i;
  	return n_i;
@@@ -107,16 -113,25 +107,16 @@@ nla_put_failure
  
  static void tcf_police_destroy(struct tcf_police *p)
  {
 -	unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
 -	struct tcf_common **p1p;
 -
 -	for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
 -		if (*p1p == &p->common) {
 -			write_lock_bh(&police_lock);
 -			*p1p = p->tcf_next;
 -			write_unlock_bh(&police_lock);
 -			gen_kill_estimator(&p->tcf_bstats,
 -					   &p->tcf_rate_est);
 -			/*
 -			 * gen_estimator est_timer() might access p->tcf_lock
 -			 * or bstats, wait a RCU grace period before freeing p
 -			 */
 -			kfree_rcu(p, tcf_rcu);
 -			return;
 -		}
 -	}
 -	WARN_ON(1);
 +	spin_lock_bh(&police_hash_info.lock);
 +	hlist_del(&p->tcf_head);
 +	spin_unlock_bh(&police_hash_info.lock);
 +	gen_kill_estimator(&p->tcf_bstats,
 +			   &p->tcf_rate_est);
 +	/*
 +	 * gen_estimator est_timer() might access p->tcf_lock
 +	 * or bstats, wait a RCU grace period before freeing p
 +	 */
 +	kfree_rcu(p, tcf_rcu);
  }
  
  static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
@@@ -162,10 -177,12 +162,12 @@@ static int tcf_act_police_locate(struc
  			if (bind) {
  				police->tcf_bindcnt += 1;
  				police->tcf_refcnt += 1;
+ 				return 0;
  			}
  			if (ovr)
  				goto override;
- 			return ret;
+ 			/* not replacing */
+ 			return -EEXIST;
  		}
  	}
  
@@@ -251,9 -268,10 +253,9 @@@ override
  	police->tcf_index = parm->index ? parm->index :
  		tcf_hash_new_index(&police_idx_gen, &police_hash_info);
  	h = tcf_hash(police->tcf_index, POL_TAB_MASK);
 -	write_lock_bh(&police_lock);
 -	police->tcf_next = tcf_police_ht[h];
 -	tcf_police_ht[h] = &police->common;
 -	write_unlock_bh(&police_lock);
 +	spin_lock_bh(&police_hash_info.lock);
 +	hlist_add_head(&police->tcf_head, &police_hash_info.htab[h]);
 +	spin_unlock_bh(&police_hash_info.lock);
  
  	a->priv = police;
  	return ret;
@@@ -261,8 -279,10 +263,8 @@@
  failure_unlock:
  	spin_unlock_bh(&police->tcf_lock);
  failure:
 -	if (P_tab)
 -		qdisc_put_rtab(P_tab);
 -	if (R_tab)
 -		qdisc_put_rtab(R_tab);
 +	qdisc_put_rtab(P_tab);
 +	qdisc_put_rtab(R_tab);
  	if (ret == ACT_P_CREATED)
  		kfree(police);
  	return err;
@@@ -396,19 -416,12 +398,19 @@@ static struct tc_action_ops act_police_
  static int __init
  police_init_module(void)
  {
 -	return tcf_register_action(&act_police_ops);
 +	int err = tcf_hashinfo_init(&police_hash_info, POL_TAB_MASK);
 +	if (err)
 +		return err;
 +	err = tcf_register_action(&act_police_ops);
 +	if (err)
 +		tcf_hashinfo_destroy(&police_hash_info);
 +	return err;
  }
  
  static void __exit
  police_cleanup_module(void)
  {
 +	tcf_hashinfo_destroy(&police_hash_info);
  	tcf_unregister_action(&act_police_ops);
  }
  
diff --combined net/sched/act_simple.c
index 81aebc1,f7b45ab..b44491e
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@@ -25,8 -25,15 +25,8 @@@
  #include <net/tc_act/tc_defact.h>
  
  #define SIMP_TAB_MASK     7
 -static struct tcf_common *tcf_simp_ht[SIMP_TAB_MASK + 1];
  static u32 simp_idx_gen;
 -static DEFINE_RWLOCK(simp_lock);
 -
 -static struct tcf_hashinfo simp_hash_info = {
 -	.htab	=	tcf_simp_ht,
 -	.hmask	=	SIMP_TAB_MASK,
 -	.lock	=	&simp_lock,
 -};
 +static struct tcf_hashinfo simp_hash_info;
  
  #define SIMP_MAX_DATA	32
  static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
@@@ -135,10 -142,13 +135,13 @@@ static int tcf_simp_init(struct net *ne
  		ret = ACT_P_CREATED;
  	} else {
  		d = to_defact(pc);
- 		if (!ovr) {
- 			tcf_simp_release(d, bind);
+ 
+ 		if (bind)
+ 			return 0;
+ 		tcf_simp_release(d, bind);
+ 		if (!ovr)
  			return -EEXIST;
- 		}
+ 
  		reset_policy(d, defdata, parm);
  	}
  
@@@ -202,23 -212,14 +205,23 @@@ MODULE_LICENSE("GPL")
  
  static int __init simp_init_module(void)
  {
 -	int ret = tcf_register_action(&act_simp_ops);
 +	int err, ret;
 +	err = tcf_hashinfo_init(&simp_hash_info, SIMP_TAB_MASK);
 +	if (err)
 +		return err;
 +
 +	ret = tcf_register_action(&act_simp_ops);
  	if (!ret)
  		pr_info("Simple TC action Loaded\n");
 +	else
 +		tcf_hashinfo_destroy(&simp_hash_info);
 +
  	return ret;
  }
  
  static void __exit simp_cleanup_module(void)
  {
 +	tcf_hashinfo_destroy(&simp_hash_info);
  	tcf_unregister_action(&act_simp_ops);
  }
  
diff --combined net/sched/act_skbedit.c
index aa0a4c0,8fe9d25..0fa1aad
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@@ -11,7 -11,8 +11,7 @@@
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 - * Place - Suite 330, Boston, MA 02111-1307 USA.
 + * this program; if not, see <http://www.gnu.org/licenses/>.
   *
   * Author: Alexander Duyck <alexander.h.duyck at intel.com>
   */
@@@ -28,8 -29,15 +28,8 @@@
  #include <net/tc_act/tc_skbedit.h>
  
  #define SKBEDIT_TAB_MASK     15
 -static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1];
  static u32 skbedit_idx_gen;
 -static DEFINE_RWLOCK(skbedit_lock);
 -
 -static struct tcf_hashinfo skbedit_hash_info = {
 -	.htab	=	tcf_skbedit_ht,
 -	.hmask	=	SKBEDIT_TAB_MASK,
 -	.lock	=	&skbedit_lock,
 -};
 +static struct tcf_hashinfo skbedit_hash_info;
  
  static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
  		       struct tcf_result *res)
@@@ -112,10 -120,11 +112,11 @@@ static int tcf_skbedit_init(struct net 
  		ret = ACT_P_CREATED;
  	} else {
  		d = to_skbedit(pc);
- 		if (!ovr) {
- 			tcf_hash_release(pc, bind, &skbedit_hash_info);
+ 		if (bind)
+ 			return 0;
+ 		tcf_hash_release(pc, bind, &skbedit_hash_info);
+ 		if (!ovr)
  			return -EEXIST;
- 		}
  	}
  
  	spin_lock_bh(&d->tcf_lock);
@@@ -203,15 -212,11 +204,15 @@@ MODULE_LICENSE("GPL")
  
  static int __init skbedit_init_module(void)
  {
 +	int err = tcf_hashinfo_init(&skbedit_hash_info, SKBEDIT_TAB_MASK);
 +	if (err)
 +		return err;
  	return tcf_register_action(&act_skbedit_ops);
  }
  
  static void __exit skbedit_cleanup_module(void)
  {
 +	tcf_hashinfo_destroy(&skbedit_hash_info);
  	tcf_unregister_action(&act_skbedit_ops);
  }
  
diff --combined net/sctp/outqueue.c
index 111516c,59268f6..9c77947
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@@ -22,8 -22,9 +22,8 @@@
   * See the GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
 - * along with GNU CC; see the file COPYING.  If not, write to
 - * the Free Software Foundation, 59 Temple Place - Suite 330,
 - * Boston, MA 02111-1307, USA.
 + * along with GNU CC; see the file COPYING.  If not, see
 + * <http://www.gnu.org/licenses/>.
   *
   * Please send any bug reports or fixes you make to the
   * email address(es):
@@@ -110,7 -111,7 +110,7 @@@ static inline int sctp_cacc_skip_3_1_d(
  				       struct sctp_transport *transport,
  				       int count_of_newacks)
  {
 -	if (count_of_newacks >=2 && transport != primary)
 +	if (count_of_newacks >= 2 && transport != primary)
  		return 1;
  	return 0;
  }
@@@ -207,8 -208,6 +207,6 @@@ void sctp_outq_init(struct sctp_associa
  	INIT_LIST_HEAD(&q->retransmit);
  	INIT_LIST_HEAD(&q->sacked);
  	INIT_LIST_HEAD(&q->abandoned);
- 
- 	q->empty = 1;
  }
  
  /* Free the outqueue structure and any related pending chunks.
@@@ -331,7 -330,6 +329,6 @@@ int sctp_outq_tail(struct sctp_outq *q
  				SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
  			else
  				SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
- 			q->empty = 0;
  			break;
  		}
  	} else {
@@@ -470,7 -468,7 +467,7 @@@ void sctp_retransmit(struct sctp_outq *
  	struct net *net = sock_net(q->asoc->base.sk);
  	int error = 0;
  
 -	switch(reason) {
 +	switch (reason) {
  	case SCTP_RTXR_T3_RTX:
  		SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
  		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
@@@ -653,7 -651,6 +650,6 @@@ redo
  			if (chunk->fast_retransmit == SCTP_NEED_FRTX)
  				chunk->fast_retransmit = SCTP_DONT_FRTX;
  
- 			q->empty = 0;
  			q->asoc->stats.rtxchunks++;
  			break;
  		}
@@@ -1064,8 -1061,6 +1060,6 @@@ static int sctp_outq_flush(struct sctp_
  
  			sctp_transport_reset_timers(transport);
  
- 			q->empty = 0;
- 
  			/* Only let one DATA chunk get bundled with a
  			 * COOKIE-ECHO chunk.
  			 */
@@@ -1088,7 -1083,7 +1082,7 @@@ sctp_flush_out
  	 *
  	 * --xguo
  	 */
 -	while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
 +	while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) {
  		struct sctp_transport *t = list_entry(ltransport,
  						      struct sctp_transport,
  						      send_ready);
@@@ -1217,7 -1212,7 +1211,7 @@@ int sctp_outq_sack(struct sctp_outq *q
  		 * destinations for which cacc_saw_newack is set.
  		 */
  		if (transport->cacc.cacc_saw_newack)
 -			count_of_newacks ++;
 +			count_of_newacks++;
  	}
  
  	/* Move the Cumulative TSN Ack Point if appropriate.  */
@@@ -1274,29 -1269,17 +1268,17 @@@
  		 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
  		 asoc->adv_peer_ack_point);
  
- 	/* See if all chunks are acked.
- 	 * Make sure the empty queue handler will get run later.
- 	 */
- 	q->empty = (list_empty(&q->out_chunk_list) &&
- 		    list_empty(&q->retransmit));
- 	if (!q->empty)
- 		goto finish;
- 
- 	list_for_each_entry(transport, transport_list, transports) {
- 		q->empty = q->empty && list_empty(&transport->transmitted);
- 		if (!q->empty)
- 			goto finish;
- 	}
- 
- 	pr_debug("%s: sack queue is empty\n", __func__);
- finish:
- 	return q->empty;
+ 	return sctp_outq_is_empty(q);
  }
  
- /* Is the outqueue empty?  */
+ /* Is the outqueue empty?
+  * The queue is empty when we have not pending data, no in-flight data
+  * and nothing pending retransmissions.
+  */
  int sctp_outq_is_empty(const struct sctp_outq *q)
  {
- 	return q->empty;
+ 	return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
+ 	       list_empty(&q->retransmit);
  }
  
  /********************************************************************
diff --combined net/tipc/port.c
index 5fd4c8c,d43f318..b742b26
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@@ -251,18 -251,15 +251,15 @@@ struct tipc_port *tipc_createport(struc
  	return p_ptr;
  }
  
- int tipc_deleteport(u32 ref)
+ int tipc_deleteport(struct tipc_port *p_ptr)
  {
- 	struct tipc_port *p_ptr;
  	struct sk_buff *buf = NULL;
  
- 	tipc_withdraw(ref, 0, NULL);
- 	p_ptr = tipc_port_lock(ref);
- 	if (!p_ptr)
- 		return -EINVAL;
+ 	tipc_withdraw(p_ptr, 0, NULL);
  
- 	tipc_ref_discard(ref);
- 	tipc_port_unlock(p_ptr);
+ 	spin_lock_bh(p_ptr->lock);
+ 	tipc_ref_discard(p_ptr->ref);
+ 	spin_unlock_bh(p_ptr->lock);
  
  	k_cancel_timer(&p_ptr->timer);
  	if (p_ptr->connected) {
@@@ -704,47 -701,36 +701,36 @@@ int tipc_set_portimportance(u32 ref, un
  }
  
  
- int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+ int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
+ 		 struct tipc_name_seq const *seq)
  {
- 	struct tipc_port *p_ptr;
  	struct publication *publ;
  	u32 key;
- 	int res = -EINVAL;
  
- 	p_ptr = tipc_port_lock(ref);
- 	if (!p_ptr)
+ 	if (p_ptr->connected)
  		return -EINVAL;
+ 	key = p_ptr->ref + p_ptr->pub_count + 1;
+ 	if (key == p_ptr->ref)
+ 		return -EADDRINUSE;
  
- 	if (p_ptr->connected)
- 		goto exit;
- 	key = ref + p_ptr->pub_count + 1;
- 	if (key == ref) {
- 		res = -EADDRINUSE;
- 		goto exit;
- 	}
  	publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
  				    scope, p_ptr->ref, key);
  	if (publ) {
  		list_add(&publ->pport_list, &p_ptr->publications);
  		p_ptr->pub_count++;
  		p_ptr->published = 1;
- 		res = 0;
+ 		return 0;
  	}
- exit:
- 	tipc_port_unlock(p_ptr);
- 	return res;
+ 	return -EINVAL;
  }
  
- int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+ int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
+ 		  struct tipc_name_seq const *seq)
  {
- 	struct tipc_port *p_ptr;
  	struct publication *publ;
  	struct publication *tpubl;
  	int res = -EINVAL;
  
- 	p_ptr = tipc_port_lock(ref);
- 	if (!p_ptr)
- 		return -EINVAL;
  	if (!seq) {
  		list_for_each_entry_safe(publ, tpubl,
  					 &p_ptr->publications, pport_list) {
@@@ -771,7 -757,6 +757,6 @@@
  	}
  	if (list_empty(&p_ptr->publications))
  		p_ptr->published = 0;
- 	tipc_port_unlock(p_ptr);
  	return res;
  }
  
@@@ -832,14 -817,17 +817,14 @@@ exit
   */
  int __tipc_disconnect(struct tipc_port *tp_ptr)
  {
 -	int res;
 -
  	if (tp_ptr->connected) {
  		tp_ptr->connected = 0;
  		/* let timer expire on it's own to avoid deadlock! */
  		tipc_nodesub_unsubscribe(&tp_ptr->subscription);
 -		res = 0;
 -	} else {
 -		res = -ENOTCONN;
 +		return 0;
  	}
 -	return res;
 +
 +	return -ENOTCONN;
  }
  
  /*
diff --combined net/tipc/socket.c
index 5efdeef,e741416..c8341d1
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@@ -239,6 -239,7 +239,6 @@@ static int tipc_sk_create(struct net *n
  int tipc_sock_create_local(int type, struct socket **res)
  {
  	int rc;
 -	struct sock *sk;
  
  	rc = sock_create_lite(AF_TIPC, type, 0, res);
  	if (rc < 0) {
@@@ -247,6 -248,8 +247,6 @@@
  	}
  	tipc_sk_create(&init_net, *res, 0, 1);
  
 -	sk = (*res)->sk;
 -
  	return 0;
  }
  
@@@ -351,7 -354,7 +351,7 @@@ static int release(struct socket *sock
  	 * Delete TIPC port; this ensures no more messages are queued
  	 * (also disconnects an active connection & sends a 'FIN-' to peer)
  	 */
- 	res = tipc_deleteport(tport->ref);
+ 	res = tipc_deleteport(tport);
  
  	/* Discard any remaining (connection-based) messages in receive queue */
  	__skb_queue_purge(&sk->sk_receive_queue);
@@@ -383,30 -386,46 +383,46 @@@
   */
  static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
  {
+ 	struct sock *sk = sock->sk;
  	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
- 	u32 portref = tipc_sk_port(sock->sk)->ref;
+ 	struct tipc_port *tport = tipc_sk_port(sock->sk);
+ 	int res = -EINVAL;
  
- 	if (unlikely(!uaddr_len))
- 		return tipc_withdraw(portref, 0, NULL);
+ 	lock_sock(sk);
+ 	if (unlikely(!uaddr_len)) {
+ 		res = tipc_withdraw(tport, 0, NULL);
+ 		goto exit;
+ 	}
  
- 	if (uaddr_len < sizeof(struct sockaddr_tipc))
- 		return -EINVAL;
- 	if (addr->family != AF_TIPC)
- 		return -EAFNOSUPPORT;
+ 	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+ 		res = -EINVAL;
+ 		goto exit;
+ 	}
+ 	if (addr->family != AF_TIPC) {
+ 		res = -EAFNOSUPPORT;
+ 		goto exit;
+ 	}
  
  	if (addr->addrtype == TIPC_ADDR_NAME)
  		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
- 	else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
- 		return -EAFNOSUPPORT;
+ 	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+ 		res = -EAFNOSUPPORT;
+ 		goto exit;
+ 	}
  
  	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
  	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
- 	    (addr->addr.nameseq.type != TIPC_CFG_SRV))
- 		return -EACCES;
+ 	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
+ 		res = -EACCES;
+ 		goto exit;
+ 	}
  
- 	return (addr->scope > 0) ?
- 		tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
- 		tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
+ 	res = (addr->scope > 0) ?
+ 		tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
+ 		tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
+ exit:
+ 	release_sock(sk);
+ 	return res;
  }
  
  /**
@@@ -751,11 -770,16 +767,11 @@@ static int send_stream(struct kiocb *io
  
  	/* Handle special cases where there is no connection */
  	if (unlikely(sock->state != SS_CONNECTED)) {
 -		if (sock->state == SS_UNCONNECTED) {
 +		if (sock->state == SS_UNCONNECTED)
  			res = send_packet(NULL, sock, m, total_len);
 -			goto exit;
 -		} else if (sock->state == SS_DISCONNECTING) {
 -			res = -EPIPE;
 -			goto exit;
 -		} else {
 -			res = -ENOTCONN;
 -			goto exit;
 -		}
 +		else
 +			res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
 +		goto exit;
  	}
  
  	if (unlikely(m->msg_name)) {
@@@ -1303,12 -1327,14 +1319,12 @@@ static u32 filter_connect(struct tipc_s
  static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
  {
  	struct tipc_msg *msg = buf_msg(buf);
 -	unsigned int limit;
  
  	if (msg_connected(msg))
 -		limit = sysctl_tipc_rmem[2];
 -	else
 -		limit = sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
 -			msg_importance(msg);
 -	return limit;
 +		return sysctl_tipc_rmem[2];
 +
 +	return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
 +		msg_importance(msg);
  }
  
  /**
@@@ -1504,12 -1530,14 +1520,12 @@@ static int connect(struct socket *sock
  				sock->state != SS_CONNECTING,
  				timeout ? (long)msecs_to_jiffies(timeout)
  					: MAX_SCHEDULE_TIMEOUT);
  		if (res <= 0) {
  			if (res == 0)
  				res = -ETIMEDOUT;
 -			else
 -				; /* leave "res" unchanged */
 -			goto exit;
 +			return res;
  		}
 +		lock_sock(sk);
  	}
  
  	if (unlikely(sock->state == SS_DISCONNECTING))

-- 
LinuxNextTracking


More information about the linux-merge mailing list