[linux-next] LinuxNextTracking branch, master, updated. next-20130430

batman at open-mesh.org batman at open-mesh.org
Wed May 1 00:22:08 CEST 2013


The following commit has been merged in the master branch:
commit dba554cc481ab6582202dd954f927e7657ee453a
Merge: 28b109bee38abe21a35dd2860f7a76b2aa1c53b1 b807a3d688c7b1da86662b220ace548389c48167
Author: Stephen Rothwell <sfr at canb.auug.org.au>
Date:   Tue Apr 30 12:24:04 2013 +1000

    Merge remote-tracking branch 'net-next/master'
    
    Conflicts:
    	drivers/infiniband/hw/cxgb4/qp.c
    	drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
    	drivers/net/ethernet/emulex/benet/be.h
    	include/linux/pci.h
    	include/net/tcp.h
    	net/mac802154/mac802154.h

diff --combined MAINTAINERS
index ba01c5e,cae1f8e..c9a47e9
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -1031,7 -1031,6 +1031,7 @@@ F:	drivers/mmc/host/msm_sdcc.
  F:	drivers/tty/serial/msm_serial.h
  F:	drivers/tty/serial/msm_serial.c
  F:	drivers/*/pm8???-*
 +F:	drivers/ssbi/
  F:	include/linux/mfd/pm8xxx/
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm.git
  S:	Maintained
@@@ -1765,7 -1764,7 +1765,7 @@@ F:	arch/arm/configs/bcm2835_defconfi
  F:	drivers/*/*bcm2835*
  
  BROADCOM TG3 GIGABIT ETHERNET DRIVER
- M:	Matt Carlson <mcarlson at broadcom.com>
+ M:	Nithin Nayak Sujir <nsujir at broadcom.com>
  M:	Michael Chan <mchan at broadcom.com>
  L:	netdev at vger.kernel.org
  S:	Supported
@@@ -1887,7 -1886,7 +1887,7 @@@ F:	Documentation/video4linux/cafe_cci
  F:	drivers/media/platform/marvell-ccic/
  
  CAIF NETWORK LAYER
- M:	Sjur Braendeland <sjur.brandeland at stericsson.com>
+ M:	Dmitry Tarnyagin <dmitry.tarnyagin at lockless.no>
  L:	netdev at vger.kernel.org
  S:	Supported
  F:	Documentation/networking/caif/
@@@ -2201,34 -2200,12 +2201,34 @@@ F:	drivers/net/ethernet/ti/cpmac.
  
  CPU FREQUENCY DRIVERS
  M:	Rafael J. Wysocki <rjw at sisk.pl>
 +M:	Viresh Kumar <viresh.kumar at linaro.org>
  L:	cpufreq at vger.kernel.org
  L:	linux-pm at vger.kernel.org
  S:	Maintained
 +T:	git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
  F:	drivers/cpufreq/
  F:	include/linux/cpufreq.h
  
 +CPU FREQUENCY DRIVERS - ARM BIG LITTLE
 +M:	Viresh Kumar <viresh.kumar at linaro.org>
 +M:	Sudeep KarkadaNagesha <sudeep.karkadanagesha at arm.com>
 +L:	cpufreq at vger.kernel.org
 +L:	linux-pm at vger.kernel.org
 +W:	http://www.arm.com/products/processors/technologies/biglittleprocessing.php
 +S:	Maintained
 +F:	drivers/cpufreq/arm_big_little.h
 +F:	drivers/cpufreq/arm_big_little.c
 +F:	drivers/cpufreq/arm_big_little_dt.c
 +
 +CPUIDLE DRIVERS
 +M:	Rafael J. Wysocki <rjw at sisk.pl>
 +M:	Daniel Lezcano <daniel.lezcano at linaro.org>
 +L:	linux-pm at vger.kernel.org
 +S:	Maintained
 +T:	git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
 +F:	drivers/cpuidle/*
 +F:	include/linux/cpuidle.h
 +
  CPUID/MSR DRIVER
  M:	"H. Peter Anvin" <hpa at zytor.com>
  S:	Maintained
@@@ -2307,7 -2284,7 +2307,7 @@@ L:	linux-media at vger.kernel.or
  T:	git git://linuxtv.org/media_tree.git
  W:	http://linuxtv.org
  S:	Maintained
 -F:	drivers/media/i2c/cx2341x*
 +F:	drivers/media/common/cx2341x*
  F:	include/media/cx2341x*
  
  CX88 VIDEO4LINUX DRIVER
@@@ -2390,16 -2367,6 +2390,16 @@@ W:	http://www.cyclades.com
  S:	Orphan
  F:	drivers/net/wan/pc300*
  
 +CYPRESS_FIRMWARE MEDIA DRIVER
 +M:	Antti Palosaari <crope at iki.fi>
 +L:	linux-media at vger.kernel.org
 +W:	http://linuxtv.org/
 +W:	http://palosaari.fi/linux/
 +Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 +T:	git git://linuxtv.org/anttip/media_tree.git
 +S:	Maintained
 +F:	drivers/media/common/cypress_firmware*
 +
  CYTTSP TOUCHSCREEN DRIVER
  M:	Javier Martinez Canillas <javier at dowhile0.org>
  L:	linux-input at vger.kernel.org
@@@ -2491,12 -2458,6 +2491,12 @@@ M:	Matthew Garrett <mjg59 at srcf.ucam.org
  S:	Maintained
  F:	drivers/platform/x86/dell-wmi.c
  
 +DESIGNWARE USB2 DRD IP DRIVER
 +M:	Paul Zimmerman <paulz at synopsys.com>
 +L:	linux-usb at vger.kernel.org
 +S:	Maintained
 +F:	drivers/staging/dwc2/
 +
  DESIGNWARE USB3 DRD IP DRIVER
  M:	Felipe Balbi <balbi at ti.com>
  L:	linux-usb at vger.kernel.org
@@@ -2770,7 -2731,7 +2770,7 @@@ T:	git git://linuxtv.org/media_tree.gi
  S:	Maintained
  F:	drivers/media/usb/dvb-usb/cxusb*
  
 -DVB_USB_CYPRESS_FIRMWARE MEDIA DRIVER
 +DVB_USB_EC168 MEDIA DRIVER
  M:	Antti Palosaari <crope at iki.fi>
  L:	linux-media at vger.kernel.org
  W:	http://linuxtv.org/
@@@ -2778,16 -2739,17 +2778,16 @@@ W:	http://palosaari.fi/linux
  Q:	http://patchwork.linuxtv.org/project/linux-media/list/
  T:	git git://linuxtv.org/anttip/media_tree.git
  S:	Maintained
 -F:	drivers/media/usb/dvb-usb-v2/cypress_firmware*
 +F:	drivers/media/usb/dvb-usb-v2/ec168*
  
 -DVB_USB_EC168 MEDIA DRIVER
 +DVB_USB_GL861 MEDIA DRIVER
  M:	Antti Palosaari <crope at iki.fi>
  L:	linux-media at vger.kernel.org
  W:	http://linuxtv.org/
  Q:	http://patchwork.linuxtv.org/project/linux-media/list/
  T:	git git://linuxtv.org/anttip/media_tree.git
  S:	Maintained
 -F:	drivers/media/usb/dvb-usb-v2/ec168*
 +F:	drivers/media/usb/dvb-usb-v2/gl861*
  
  DVB_USB_MXL111SF MEDIA DRIVER
  M:	Michael Krufky <mkrufky at linuxtv.org>
@@@ -3631,14 -3593,6 +3631,14 @@@ W:	http://www.kernel.org/pub/linux/kern
  S:	Maintained
  F:	drivers/platform/x86/hdaps.c
  
 +HDPVR USB VIDEO ENCODER DRIVER
 +M:	Hans Verkuil <hverkuil at xs4all.nl>
 +L:	linux-media at vger.kernel.org
 +T:	git git://linuxtv.org/media_tree.git
 +W:	http://linuxtv.org
 +S:	Odd Fixes
 +F:	drivers/media/usb/hdpvr
 +
  HWPOISON MEMORY FAILURE HANDLING
  M:	Andi Kleen <andi at firstfloor.org>
  L:	linux-mm at kvack.org
@@@ -3910,6 -3864,7 +3910,6 @@@ F:	drivers/i2c/i2c-stub.
  
  I2C SUBSYSTEM
  M:	Wolfram Sang <wsa at the-dreams.de>
 -M:	"Ben Dooks (embedded platforms)" <ben-linux at fluff.org>
  L:	linux-i2c at vger.kernel.org
  W:	http://i2c.wiki.kernel.org/
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
@@@ -4467,16 -4422,6 +4467,16 @@@ Q:	http://patchwork.linuxtv.org/project
  S:	Maintained
  F:	drivers/media/dvb-frontends/it913x-fe*
  
 +IT913X MEDIA DRIVER
 +M:	Antti Palosaari <crope at iki.fi>
 +L:	linux-media at vger.kernel.org
 +W:	http://linuxtv.org/
 +W:	http://palosaari.fi/linux/
 +Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 +T:	git git://linuxtv.org/anttip/media_tree.git
 +S:	Maintained
 +F:	drivers/media/tuners/it913x*
 +
  IVTV VIDEO4LINUX DRIVER
  M:	Andy Walls <awalls at md.metrocast.net>
  L:	ivtv-devel at ivtvdriver.org (moderated for non-subscribers)
@@@ -5467,13 -5412,6 +5467,13 @@@ L:	linux-scsi at vger.kernel.or
  S:	Maintained
  F:	drivers/scsi/NCR_D700.*
  
 +NCT6775 HARDWARE MONITOR DRIVER
 +M:	Guenter Roeck <linux at roeck-us.net>
 +L:	lm-sensors at lm-sensors.org
 +S:	Maintained
 +F:	Documentation/hwmon/nct6775
 +F:	drivers/hwmon/nct6775.c
 +
  NETEFFECT IWARP RNIC DRIVER (IW_NES)
  M:	Faisal Latif <faisal.latif at intel.com>
  L:	linux-rdma at vger.kernel.org
@@@ -6260,7 -6198,7 +6260,7 @@@ S:	Supporte
  F:	drivers/scsi/pmcraid.*
  
  PMC SIERRA PM8001 DRIVER
 -M:	jack_wang at usish.com
 +M:	xjtuwjp at gmail.com
  M:	lindar_liu at usish.com
  L:	linux-scsi at vger.kernel.org
  S:	Supported
@@@ -6398,6 -6336,7 +6398,7 @@@ F:	drivers/acpi/apei/erst.
  
  PTP HARDWARE CLOCK SUPPORT
  M:	Richard Cochran <richardcochran at gmail.com>
+ L:	netdev at vger.kernel.org
  S:	Maintained
  W:	http://linuxptp.sourceforge.net/
  F:	Documentation/ABI/testing/sysfs-ptp
@@@ -6529,6 -6468,7 +6530,7 @@@ S:	Supporte
  F:	drivers/net/ethernet/qlogic/qlcnic/
  
  QLOGIC QLGE 10Gb ETHERNET DRIVER
+ M:	Shahed Shaikh <shahed.shaikh at qlogic.com>
  M:	Jitendra Kalsaria <jitendra.kalsaria at qlogic.com>
  M:	Ron Mercer <ron.mercer at qlogic.com>
  M:	linux-driver at qlogic.com
@@@ -6754,16 -6694,6 +6756,16 @@@ T:	git git://linuxtv.org/anttip/media_t
  S:	Maintained
  F:	drivers/media/dvb-frontends/rtl2830*
  
 +RTL2832 MEDIA DRIVER
 +M:	Antti Palosaari <crope at iki.fi>
 +L:	linux-media at vger.kernel.org
 +W:	http://linuxtv.org/
 +W:	http://palosaari.fi/linux/
 +Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 +T:	git git://linuxtv.org/anttip/media_tree.git
 +S:	Maintained
 +F:	drivers/media/dvb-frontends/rtl2832*
 +
  RTL8180 WIRELESS DRIVER
  M:	"John W. Linville" <linville at tuxdriver.com>
  L:	linux-wireless at vger.kernel.org
@@@ -6866,7 -6796,7 +6868,7 @@@ L:	linux-media at vger.kernel.or
  W:	http://linuxtv.org
  T:	git git://linuxtv.org/media_tree.git
  S:	Odd fixes
 -F:	Documentation/video4linux/saa7134/
 +F:	Documentation/video4linux/*.saa7134
  F:	drivers/media/pci/saa7134/
  
  SAA7146 VIDEO4LINUX-2 DRIVER
@@@ -6959,8 -6889,9 +6961,8 @@@ F:	drivers/clocksourc
  
  TLG2300 VIDEO4LINUX-2 DRIVER
  M:	Huang Shijie <shijie8 at gmail.com>
 -M:	Kang Yong <kangyong at telegent.com>
 -M:	Zhang Xiaobing <xbzhang at telegent.com>
 -S:	Supported
 +M:	Hans Verkuil <hverkuil at xs4all.nl>
 +S:	Odd Fixes
  F:	drivers/media/usb/tlg2300
  
  SC1200 WDT DRIVER
@@@ -7206,43 -7137,17 +7208,43 @@@ F:	drivers/media/radio/si470x/radio-si4
  F:	drivers/media/radio/si470x/radio-si470x.h
  F:	drivers/media/radio/si470x/radio-si470x-usb.c
  
 +SI4713 FM RADIO TRANSMITTER I2C DRIVER
 +M:	Eduardo Valentin <edubezval at gmail.com>
 +L:	linux-media at vger.kernel.org
 +T:	git git://linuxtv.org/media_tree.git
 +W:	http://linuxtv.org
 +S:	Odd Fixes
 +F:	drivers/media/radio/si4713-i2c.?
 +
 +SI4713 FM RADIO TRANSMITTER PLATFORM DRIVER
 +M:	Eduardo Valentin <edubezval at gmail.com>
 +L:	linux-media at vger.kernel.org
 +T:	git git://linuxtv.org/media_tree.git
 +W:	http://linuxtv.org
 +S:	Odd Fixes
 +F:	drivers/media/radio/radio-si4713.h
 +
 +SIANO DVB DRIVER
 +M:	Mauro Carvalho Chehab <mchehab at redhat.com>
 +L:	linux-media at vger.kernel.org
 +W:	http://linuxtv.org
 +T:	git git://linuxtv.org/media_tree.git
 +S:	Odd fixes
 +F:	drivers/media/common/siano/
 +F:	drivers/media/dvb/siano/
 +F:	drivers/media/usb/siano/
 +F:	drivers/media/mmc/siano
 +
  SH_VEU V4L2 MEM2MEM DRIVER
  M:	Guennadi Liakhovetski <g.liakhovetski at gmx.de>
  L:	linux-media at vger.kernel.org
  S:	Maintained
  F:	drivers/media/platform/sh_veu.c
 -F:	include/media/sh_veu.h
  
  SH_VOU V4L2 OUTPUT DRIVER
  M:	Guennadi Liakhovetski <g.liakhovetski at gmx.de>
  L:	linux-media at vger.kernel.org
 -S:	Maintained
 +S:	Odd Fixes
  F:	drivers/media/platform/sh_vou.c
  F:	include/media/sh_vou.h
  
@@@ -7284,13 -7189,14 +7286,13 @@@ F:	arch/arm/mach-davinc
  F:	drivers/i2c/busses/i2c-davinci.c
  
  TI DAVINCI SERIES MEDIA DRIVER
 -M:	Manjunath Hadli <manjunath.hadli at ti.com>
 -M:	Prabhakar Lad <prabhakar.lad at ti.com>
 +M:	Lad, Prabhakar <prabhakar.csengg at gmail.com>
  L:	linux-media at vger.kernel.org
  L:	davinci-linux-open-source at linux.davincidsp.com (moderated for non-subscribers)
  W:	http://linuxtv.org/
  Q:	http://patchwork.linuxtv.org/project/linux-media/list/
  T:	git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
 -S:	Supported
 +S:	Maintained
  F:	drivers/media/platform/davinci/
  F:	include/media/davinci/
  
@@@ -7662,11 -7568,6 +7664,11 @@@ M:	David Täht <d at teklibre.com
  S:	Odd Fixes
  F:	drivers/staging/frontier/
  
 +STAGING - GO7007 MPEG CODEC
 +M:	Hans Verkuil <hans.verkuil at cisco.com>
 +S:	Maintained
 +F:	drivers/staging/media/go7007/
 +
  STAGING - INDUSTRIAL IO
  M:	Jonathan Cameron <jic23 at cam.ac.uk>
  L:	linux-iio at vger.kernel.org
@@@ -7717,8 -7618,8 +7719,8 @@@ S:	Odd Fixe
  F:	drivers/staging/sm7xxfb/
  
  STAGING - SOFTLOGIC 6x10 MPEG CODEC
 -M:	Ben Collins <bcollins at bluecherry.net>
 -S:	Odd Fixes
 +M:	Ismael Luceno <ismael.luceno at corp.bluecherry.net>
 +S:	Supported
  F:	drivers/staging/media/solo6x10/
  
  STAGING - SPEAKUP CONSOLE SPEECH DRIVER
@@@ -8005,14 -7906,11 +8007,14 @@@ F:	arch/xtensa
  
  THERMAL
  M:      Zhang Rui <rui.zhang at intel.com>
 +M:      Eduardo Valentin <eduardo.valentin at ti.com>
  L:      linux-pm at vger.kernel.org
  T:      git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
 +Q:      https://patchwork.kernel.org/project/linux-pm/list/
  S:      Supported
  F:      drivers/thermal/
  F:      include/linux/thermal.h
 +F:      include/linux/cpu_cooling.h
  
  THINGM BLINK(1) USB RGB LED DRIVER
  M:	Vivien Didelot <vivien.didelot at savoirfairelinux.com>
@@@ -8029,12 -7927,6 +8031,12 @@@ T:	git git://repo.or.cz/linux-2.6/linux
  S:	Maintained
  F:	drivers/platform/x86/thinkpad_acpi.c
  
 +TI BANDGAP AND THERMAL DRIVER
 +M:	Eduardo Valentin <eduardo.valentin at ti.com>
 +L:	linux-pm at vger.kernel.org
 +S:	Maintained
 +F:	drivers/staging/omap-thermal/
 +
  TI FLASH MEDIA INTERFACE DRIVER
  M:	Alex Dubov <oakad at yahoo.com>
  S:	Maintained
@@@ -8630,7 -8522,7 +8632,7 @@@ F:	drivers/usb/gadget/*uvc*.
  F:	drivers/usb/gadget/webcam.c
  
  USB WIRELESS RNDIS DRIVER (rndis_wlan)
- M:	Jussi Kivilinna <jussi.kivilinna at mbnet.fi>
+ M:	Jussi Kivilinna <jussi.kivilinna at iki.fi>
  L:	linux-wireless at vger.kernel.org
  S:	Maintained
  F:	drivers/net/wireless/rndis_wlan.c
diff --combined drivers/firewire/net.c
index 75c1133,4d56536..815b0fc
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@@ -1,5 -1,6 +1,6 @@@
  /*
   * IPv4 over IEEE 1394, per RFC 2734
+  * IPv6 over IEEE 1394, per RFC 3146
   *
   * Copyright (C) 2009 Jay Fenlason <fenlason at redhat.com>
   *
@@@ -28,6 -29,7 +29,7 @@@
  
  #include <asm/unaligned.h>
  #include <net/arp.h>
+ #include <net/firewire.h>
  
  /* rx limits */
  #define FWNET_MAX_FRAGMENTS		30 /* arbitrary, > TX queue depth */
@@@ -45,6 -47,7 +47,7 @@@
  
  #define IANA_SPECIFIER_ID		0x00005eU
  #define RFC2734_SW_VERSION		0x000001U
+ #define RFC3146_SW_VERSION		0x000002U
  
  #define IEEE1394_GASP_HDR_SIZE	8
  
@@@ -57,32 -60,10 +60,10 @@@
  #define RFC2374_HDR_LASTFRAG	2	/* last fragment	*/
  #define RFC2374_HDR_INTFRAG	3	/* interior fragment	*/
  
- #define RFC2734_HW_ADDR_LEN	16
- 
- struct rfc2734_arp {
- 	__be16 hw_type;		/* 0x0018	*/
- 	__be16 proto_type;	/* 0x0806       */
- 	u8 hw_addr_len;		/* 16		*/
- 	u8 ip_addr_len;		/* 4		*/
- 	__be16 opcode;		/* ARP Opcode	*/
- 	/* Above is exactly the same format as struct arphdr */
- 
- 	__be64 s_uniq_id;	/* Sender's 64bit EUI			*/
- 	u8 max_rec;		/* Sender's max packet size		*/
- 	u8 sspd;		/* Sender's max speed			*/
- 	__be16 fifo_hi;		/* hi 16bits of sender's FIFO addr	*/
- 	__be32 fifo_lo;		/* lo 32bits of sender's FIFO addr	*/
- 	__be32 sip;		/* Sender's IP Address			*/
- 	__be32 tip;		/* IP Address of requested hw addr	*/
- } __packed;
- 
- /* This header format is specific to this driver implementation. */
- #define FWNET_ALEN	8
- #define FWNET_HLEN	10
- struct fwnet_header {
- 	u8 h_dest[FWNET_ALEN];	/* destination address */
- 	__be16 h_proto;		/* packet type ID field */
- } __packed;
+ static bool fwnet_hwaddr_is_multicast(u8 *ha)
+ {
+ 	return !!(*ha & 1);
+ }
  
  /* IPv4 and IPv6 encapsulation header */
  struct rfc2734_header {
@@@ -191,8 -172,6 +172,6 @@@ struct fwnet_peer 
  	struct list_head peer_link;
  	struct fwnet_device *dev;
  	u64 guid;
- 	u64 fifo;
- 	__be32 ip;
  
  	/* guarded by dev->lock */
  	struct list_head pd_list; /* received partial datagrams */
@@@ -222,6 -201,15 +201,15 @@@ struct fwnet_packet_task 
  };
  
  /*
+  * Get fifo address embedded in hwaddr
+  */
+ static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha)
+ {
+ 	return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32
+ 	       | get_unaligned_be32(&ha->uc.fifo_lo);
+ }
+ 
+ /*
   * saddr == NULL means use device source address.
   * daddr == NULL means leave destination address (eg unresolved arp).
   */
@@@ -368,8 -356,10 +356,8 @@@ static struct fwnet_fragment_info *fwne
  	}
  
  	new = kmalloc(sizeof(*new), GFP_ATOMIC);
 -	if (!new) {
 -		dev_err(&pd->skb->dev->dev, "out of memory\n");
 +	if (!new)
  		return NULL;
 -	}
  
  	new->offset = offset;
  	new->len = len;
@@@ -412,6 -402,8 +400,6 @@@ fail_w_fi
  fail_w_new:
  	kfree(new);
  fail:
 -	dev_err(&net->dev, "out of memory\n");
 -
  	return NULL;
  }
  
@@@ -509,10 -501,20 +497,20 @@@ static int fwnet_finish_incoming_packet
  					bool is_broadcast, u16 ether_type)
  {
  	struct fwnet_device *dev;
  	int status;
  	__be64 guid;
  
+ 	switch (ether_type) {
+ 	case ETH_P_ARP:
+ 	case ETH_P_IP:
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	case ETH_P_IPV6:
+ #endif
+ 		break;
+ 	default:
+ 		goto err;
+ 	}
+ 
  	dev = netdev_priv(net);
  	/* Write metadata, and then pass to the receive level */
  	skb->dev = net;
@@@ -520,92 -522,11 +518,11 @@@
  
  	/*
  	 * Parse the encapsulation header. This actually does the job of
- 	 * converting to an ethernet frame header, as well as arp
- 	 * conversion if needed. ARP conversion is easier in this
- 	 * direction, since we are using ethernet as our backend.
+ 	 * converting to an ethernet-like pseudo frame header.
  	 */
- 	/*
- 	 * If this is an ARP packet, convert it. First, we want to make
- 	 * use of some of the fields, since they tell us a little bit
- 	 * about the sending machine.
- 	 */
- 	if (ether_type == ETH_P_ARP) {
- 		struct rfc2734_arp *arp1394;
- 		struct arphdr *arp;
- 		unsigned char *arp_ptr;
- 		u64 fifo_addr;
- 		u64 peer_guid;
- 		unsigned sspd;
- 		u16 max_payload;
- 		struct fwnet_peer *peer;
- 		unsigned long flags;
- 
- 		arp1394   = (struct rfc2734_arp *)skb->data;
- 		arp       = (struct arphdr *)skb->data;
- 		arp_ptr   = (unsigned char *)(arp + 1);
- 		peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
- 		fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
- 				| get_unaligned_be32(&arp1394->fifo_lo);
- 
- 		sspd = arp1394->sspd;
- 		/* Sanity check.  OS X 10.3 PPC reportedly sends 131. */
- 		if (sspd > SCODE_3200) {
- 			dev_notice(&net->dev, "sspd %x out of range\n", sspd);
- 			sspd = SCODE_3200;
- 		}
- 		max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
- 
- 		spin_lock_irqsave(&dev->lock, flags);
- 		peer = fwnet_peer_find_by_guid(dev, peer_guid);
- 		if (peer) {
- 			peer->fifo = fifo_addr;
- 
- 			if (peer->speed > sspd)
- 				peer->speed = sspd;
- 			if (peer->max_payload > max_payload)
- 				peer->max_payload = max_payload;
- 
- 			peer->ip = arp1394->sip;
- 		}
- 		spin_unlock_irqrestore(&dev->lock, flags);
- 
- 		if (!peer) {
- 			dev_notice(&net->dev,
- 				   "no peer for ARP packet from %016llx\n",
- 				   (unsigned long long)peer_guid);
- 			goto no_peer;
- 		}
- 
- 		/*
- 		 * Now that we're done with the 1394 specific stuff, we'll
- 		 * need to alter some of the data.  Believe it or not, all
- 		 * that needs to be done is sender_IP_address needs to be
- 		 * moved, the destination hardware address get stuffed
- 		 * in and the hardware address length set to 8.
- 		 *
- 		 * IMPORTANT: The code below overwrites 1394 specific data
- 		 * needed above so keep the munging of the data for the
- 		 * higher level IP stack last.
- 		 */
- 
- 		arp->ar_hln = 8;
- 		/* skip over sender unique id */
- 		arp_ptr += arp->ar_hln;
- 		/* move sender IP addr */
- 		put_unaligned(arp1394->sip, (u32 *)arp_ptr);
- 		/* skip over sender IP addr */
- 		arp_ptr += arp->ar_pln;
- 
- 		if (arp->ar_op == htons(ARPOP_REQUEST))
- 			memset(arp_ptr, 0, sizeof(u64));
- 		else
- 			memcpy(arp_ptr, net->dev_addr, sizeof(u64));
- 	}
- 
- 	/* Now add the ethernet header. */
  	guid = cpu_to_be64(dev->card->guid);
  	if (dev_hard_header(skb, net, ether_type,
- 			   is_broadcast ? &broadcast_hw : &guid,
+ 			   is_broadcast ? net->broadcast : net->dev_addr,
  			   NULL, skb->len) >= 0) {
  		struct fwnet_header *eth;
  		u16 *rawp;
@@@ -614,7 -535,7 +531,7 @@@
  		skb_reset_mac_header(skb);
  		skb_pull(skb, sizeof(*eth));
  		eth = (struct fwnet_header *)skb_mac_header(skb);
- 		if (*eth->h_dest & 1) {
+ 		if (fwnet_hwaddr_is_multicast(eth->h_dest)) {
  			if (memcmp(eth->h_dest, net->broadcast,
  				   net->addr_len) == 0)
  				skb->pkt_type = PACKET_BROADCAST;
@@@ -626,7 -547,7 +543,7 @@@
  			if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
  				skb->pkt_type = PACKET_OTHERHOST;
  		}
- 		if (ntohs(eth->h_proto) >= 1536) {
+ 		if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
  			protocol = eth->h_proto;
  		} else {
  			rawp = (u16 *)skb->data;
@@@ -648,7 -569,7 +565,7 @@@
  
  	return 0;
  
-  no_peer:
+  err:
  	net->stats.rx_errors++;
  	net->stats.rx_dropped++;
  
@@@ -688,6 -609,7 +605,6 @@@ static int fwnet_incoming_packet(struc
  
  		skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
  		if (unlikely(!skb)) {
 -			dev_err(&net->dev, "out of memory\n");
  			net->stats.rx_dropped++;
  
  			return -ENOMEM;
@@@ -851,7 -773,12 +768,12 @@@ static void fwnet_receive_broadcast(str
  	ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
  	source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
  
- 	if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ 	if (specifier_id == IANA_SPECIFIER_ID &&
+ 	    (ver == RFC2734_SW_VERSION
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	     || ver == RFC3146_SW_VERSION
+ #endif
+ 	    )) {
  		buf_ptr += 2;
  		length -= IEEE1394_GASP_HDR_SIZE;
  		fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
@@@ -1054,16 -981,27 +976,27 @@@ static int fwnet_send_packet(struct fwn
  		u8 *p;
  		int generation;
  		int node_id;
+ 		unsigned int sw_version;
  
  		/* ptask->generation may not have been set yet */
  		generation = dev->card->generation;
  		smp_rmb();
  		node_id = dev->card->node_id;
  
+ 		switch (ptask->skb->protocol) {
+ 		default:
+ 			sw_version = RFC2734_SW_VERSION;
+ 			break;
+ #if IS_ENABLED(CONFIG_IPV6)
+ 		case htons(ETH_P_IPV6):
+ 			sw_version = RFC3146_SW_VERSION;
+ #endif
+ 		}
+ 
  		p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
  		put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
  		put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
- 						| RFC2734_SW_VERSION, &p[4]);
+ 						| sw_version, &p[4]);
  
  		/* We should not transmit if broadcast_channel.valid == 0. */
  		fw_send_request(dev->card, &ptask->transaction,
@@@ -1111,6 -1049,62 +1044,62 @@@
  	return 0;
  }
  
+ static void fwnet_fifo_stop(struct fwnet_device *dev)
+ {
+ 	if (dev->local_fifo == FWNET_NO_FIFO_ADDR)
+ 		return;
+ 
+ 	fw_core_remove_address_handler(&dev->handler);
+ 	dev->local_fifo = FWNET_NO_FIFO_ADDR;
+ }
+ 
+ static int fwnet_fifo_start(struct fwnet_device *dev)
+ {
+ 	int retval;
+ 
+ 	if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
+ 		return 0;
+ 
+ 	dev->handler.length = 4096;
+ 	dev->handler.address_callback = fwnet_receive_packet;
+ 	dev->handler.callback_data = dev;
+ 
+ 	retval = fw_core_add_address_handler(&dev->handler,
+ 					     &fw_high_memory_region);
+ 	if (retval < 0)
+ 		return retval;
+ 
+ 	dev->local_fifo = dev->handler.offset;
+ 
+ 	return 0;
+ }
+ 
+ static void __fwnet_broadcast_stop(struct fwnet_device *dev)
+ {
+ 	unsigned u;
+ 
+ 	if (dev->broadcast_state != FWNET_BROADCAST_ERROR) {
+ 		for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++)
+ 			kunmap(dev->broadcast_rcv_buffer.pages[u]);
+ 		fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
+ 	}
+ 	if (dev->broadcast_rcv_context) {
+ 		fw_iso_context_destroy(dev->broadcast_rcv_context);
+ 		dev->broadcast_rcv_context = NULL;
+ 	}
+ 	kfree(dev->broadcast_rcv_buffer_ptrs);
+ 	dev->broadcast_rcv_buffer_ptrs = NULL;
+ 	dev->broadcast_state = FWNET_BROADCAST_ERROR;
+ }
+ 
+ static void fwnet_broadcast_stop(struct fwnet_device *dev)
+ {
+ 	if (dev->broadcast_state == FWNET_BROADCAST_ERROR)
+ 		return;
+ 	fw_iso_context_stop(dev->broadcast_rcv_context);
+ 	__fwnet_broadcast_stop(dev);
+ }
+ 
  static int fwnet_broadcast_start(struct fwnet_device *dev)
  {
  	struct fw_iso_context *context;
@@@ -1119,60 -1113,47 +1108,47 @@@
  	unsigned max_receive;
  	struct fw_iso_packet packet;
  	unsigned long offset;
+ 	void **ptrptr;
  	unsigned u;
  
- 	if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
- 		dev->handler.length = 4096;
- 		dev->handler.address_callback = fwnet_receive_packet;
- 		dev->handler.callback_data = dev;
- 
- 		retval = fw_core_add_address_handler(&dev->handler,
- 					&fw_high_memory_region);
- 		if (retval < 0)
- 			goto failed_initial;
- 
- 		dev->local_fifo = dev->handler.offset;
- 	}
+ 	if (dev->broadcast_state != FWNET_BROADCAST_ERROR)
+ 		return 0;
  
  	max_receive = 1U << (dev->card->max_receive + 1);
  	num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
  
- 	if (!dev->broadcast_rcv_context) {
- 		void **ptrptr;
- 
- 		context = fw_iso_context_create(dev->card,
- 		    FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL,
- 		    dev->card->link_speed, 8, fwnet_receive_broadcast, dev);
- 		if (IS_ERR(context)) {
- 			retval = PTR_ERR(context);
- 			goto failed_context_create;
- 		}
+ 	ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
+ 	if (!ptrptr) {
+ 		retval = -ENOMEM;
+ 		goto failed;
+ 	}
+ 	dev->broadcast_rcv_buffer_ptrs = ptrptr;
+ 
+ 	context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE,
+ 					IEEE1394_BROADCAST_CHANNEL,
+ 					dev->card->link_speed, 8,
+ 					fwnet_receive_broadcast, dev);
+ 	if (IS_ERR(context)) {
+ 		retval = PTR_ERR(context);
+ 		goto failed;
+ 	}
  
- 		retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer,
- 		    dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
- 		if (retval < 0)
- 			goto failed_buffer_init;
+ 	retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card,
+ 				    FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
+ 	if (retval < 0)
+ 		goto failed;
  
- 		ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
- 		if (!ptrptr) {
- 			retval = -ENOMEM;
- 			goto failed_ptrs_alloc;
- 		}
+ 	dev->broadcast_state = FWNET_BROADCAST_STOPPED;
  
- 		dev->broadcast_rcv_buffer_ptrs = ptrptr;
- 		for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
- 			void *ptr;
- 			unsigned v;
+ 	for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
+ 		void *ptr;
+ 		unsigned v;
  
- 			ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
- 			for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
- 				*ptrptr++ = (void *)
- 						((char *)ptr + v * max_receive);
- 		}
- 		dev->broadcast_rcv_context = context;
- 	} else {
- 		context = dev->broadcast_rcv_context;
+ 		ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
+ 		for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
+ 			*ptrptr++ = (void *) ((char *)ptr + v * max_receive);
  	}
+ 	dev->broadcast_rcv_context = context;
  
  	packet.payload_length = max_receive;
  	packet.interrupt = 1;
@@@ -1186,7 -1167,7 +1162,7 @@@
  		retval = fw_iso_context_queue(context, &packet,
  				&dev->broadcast_rcv_buffer, offset);
  		if (retval < 0)
- 			goto failed_rcv_queue;
+ 			goto failed;
  
  		offset += max_receive;
  	}
@@@ -1196,7 -1177,7 +1172,7 @@@
  	retval = fw_iso_context_start(context, -1, 0,
  			FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
  	if (retval < 0)
- 		goto failed_rcv_queue;
+ 		goto failed;
  
  	/* FIXME: adjust it according to the min. speed of all known peers? */
  	dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
@@@ -1205,19 -1186,8 +1181,8 @@@
  
  	return 0;
  
-  failed_rcv_queue:
- 	kfree(dev->broadcast_rcv_buffer_ptrs);
- 	dev->broadcast_rcv_buffer_ptrs = NULL;
-  failed_ptrs_alloc:
- 	fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
-  failed_buffer_init:
- 	fw_iso_context_destroy(context);
- 	dev->broadcast_rcv_context = NULL;
-  failed_context_create:
- 	fw_core_remove_address_handler(&dev->handler);
-  failed_initial:
- 	dev->local_fifo = FWNET_NO_FIFO_ADDR;
- 
+  failed:
+ 	__fwnet_broadcast_stop(dev);
  	return retval;
  }
  
@@@ -1235,11 -1205,10 +1200,10 @@@ static int fwnet_open(struct net_devic
  	struct fwnet_device *dev = netdev_priv(net);
  	int ret;
  
- 	if (dev->broadcast_state == FWNET_BROADCAST_ERROR) {
- 		ret = fwnet_broadcast_start(dev);
- 		if (ret)
- 			return ret;
- 	}
+ 	ret = fwnet_broadcast_start(dev);
+ 	if (ret)
+ 		return ret;
+ 
  	netif_start_queue(net);
  
  	spin_lock_irq(&dev->lock);
@@@ -1252,9 -1221,10 +1216,10 @@@
  /* ifdown */
  static int fwnet_stop(struct net_device *net)
  {
- 	netif_stop_queue(net);
+ 	struct fwnet_device *dev = netdev_priv(net);
  
- 	/* Deallocate iso context for use by other applications? */
+ 	netif_stop_queue(net);
+ 	fwnet_broadcast_stop(dev);
  
  	return 0;
  }
@@@ -1294,19 -1264,27 +1259,27 @@@ static netdev_tx_t fwnet_tx(struct sk_b
  	 * We might need to rebuild the header on tx failure.
  	 */
  	memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
- 	skb_pull(skb, sizeof(hdr_buf));
- 
  	proto = hdr_buf.h_proto;
+ 
+ 	switch (proto) {
+ 	case htons(ETH_P_ARP):
+ 	case htons(ETH_P_IP):
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	case htons(ETH_P_IPV6):
+ #endif
+ 		break;
+ 	default:
+ 		goto fail;
+ 	}
+ 
+ 	skb_pull(skb, sizeof(hdr_buf));
  	dg_size = skb->len;
  
  	/*
  	 * Set the transmission type for the packet.  ARP packets and IP
  	 * broadcast packets are sent via GASP.
  	 */
- 	if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0
- 	    || proto == htons(ETH_P_ARP)
- 	    || (proto == htons(ETH_P_IP)
- 		&& IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
+ 	if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) {
  		max_payload        = dev->broadcast_xmt_max_payload;
  		datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
  
@@@ -1315,11 -1293,12 +1288,12 @@@
  		ptask->dest_node   = IEEE1394_ALL_NODES;
  		ptask->speed       = SCODE_100;
  	} else {
- 		__be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
+ 		union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest;
+ 		__be64 guid = get_unaligned(&ha->uc.uniq_id);
  		u8 generation;
  
  		peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
- 		if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
+ 		if (!peer)
  			goto fail;
  
  		generation         = peer->generation;
@@@ -1327,32 -1306,12 +1301,12 @@@
  		max_payload        = peer->max_payload;
  		datagram_label_ptr = &peer->datagram_label;
  
- 		ptask->fifo_addr   = peer->fifo;
+ 		ptask->fifo_addr   = fwnet_hwaddr_fifo(ha);
  		ptask->generation  = generation;
  		ptask->dest_node   = dest_node;
  		ptask->speed       = peer->speed;
  	}
  
- 	/* If this is an ARP packet, convert it */
- 	if (proto == htons(ETH_P_ARP)) {
- 		struct arphdr *arp = (struct arphdr *)skb->data;
- 		unsigned char *arp_ptr = (unsigned char *)(arp + 1);
- 		struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
- 		__be32 ipaddr;
- 
- 		ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
- 
- 		arp1394->hw_addr_len    = RFC2734_HW_ADDR_LEN;
- 		arp1394->max_rec        = dev->card->max_receive;
- 		arp1394->sspd		= dev->card->link_speed;
- 
- 		put_unaligned_be16(dev->local_fifo >> 32,
- 				   &arp1394->fifo_hi);
- 		put_unaligned_be32(dev->local_fifo & 0xffffffff,
- 				   &arp1394->fifo_lo);
- 		put_unaligned(ipaddr, &arp1394->sip);
- 	}
- 
  	ptask->hdr.w0 = 0;
  	ptask->hdr.w1 = 0;
  	ptask->skb = skb;
@@@ -1467,8 -1426,6 +1421,6 @@@ static int fwnet_add_peer(struct fwnet_
  
  	peer->dev = dev;
  	peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
- 	peer->fifo = FWNET_NO_FIFO_ADDR;
- 	peer->ip = 0;
  	INIT_LIST_HEAD(&peer->pd_list);
  	peer->pdg_size = 0;
  	peer->datagram_label = 0;
@@@ -1498,6 -1455,7 +1450,7 @@@ static int fwnet_probe(struct device *_
  	struct fwnet_device *dev;
  	unsigned max_mtu;
  	int ret;
+ 	union fwnet_hwaddr *ha;
  
  	mutex_lock(&fwnet_device_mutex);
  
@@@ -1528,6 -1486,11 +1481,11 @@@
  	dev->card = card;
  	dev->netdev = net;
  
+ 	ret = fwnet_fifo_start(dev);
+ 	if (ret < 0)
+ 		goto out;
+ 	dev->local_fifo = dev->handler.offset;
+ 
  	/*
  	 * Use the RFC 2734 default 1500 octets or the maximum payload
  	 * as initial MTU
@@@ -1537,24 -1500,31 +1495,31 @@@
  	net->mtu = min(1500U, max_mtu);
  
  	/* Set our hardware address while we're at it */
- 	put_unaligned_be64(card->guid, net->dev_addr);
- 	put_unaligned_be64(~0ULL, net->broadcast);
+ 	ha = (union fwnet_hwaddr *)net->dev_addr;
+ 	put_unaligned_be64(card->guid, &ha->uc.uniq_id);
+ 	ha->uc.max_rec = dev->card->max_receive;
+ 	ha->uc.sspd = dev->card->link_speed;
+ 	put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi);
+ 	put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo);
+ 
+ 	memset(net->broadcast, -1, net->addr_len);
+ 
  	ret = register_netdev(net);
  	if (ret)
  		goto out;
  
  	list_add_tail(&dev->dev_link, &fwnet_device_list);
- 	dev_notice(&net->dev, "IPv4 over IEEE 1394 on card %s\n",
+ 	dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n",
  		   dev_name(card->device));
   have_dev:
  	ret = fwnet_add_peer(dev, unit, device);
  	if (ret && allocated_netdev) {
  		unregister_netdev(net);
  		list_del(&dev->dev_link);
- 	}
   out:
- 	if (ret && allocated_netdev)
+ 		fwnet_fifo_stop(dev);
  		free_netdev(net);
+ 	}
  
  	mutex_unlock(&fwnet_device_mutex);
  
@@@ -1587,22 -1557,14 +1552,14 @@@ static int fwnet_remove(struct device *
  	mutex_lock(&fwnet_device_mutex);
  
  	net = dev->netdev;
- 	if (net && peer->ip)
- 		arp_invalidate(net, peer->ip);
  
  	fwnet_remove_peer(peer, dev);
  
  	if (list_empty(&dev->peer_list)) {
  		unregister_netdev(net);
  
- 		if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
- 			fw_core_remove_address_handler(&dev->handler);
- 		if (dev->broadcast_rcv_context) {
- 			fw_iso_context_stop(dev->broadcast_rcv_context);
- 			fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
- 					      dev->card);
- 			fw_iso_context_destroy(dev->broadcast_rcv_context);
- 		}
+ 		fwnet_fifo_stop(dev);
+ 
  		for (i = 0; dev->queued_datagrams && i < 5; i++)
  			ssleep(1);
  		WARN_ON(dev->queued_datagrams);
@@@ -1641,6 -1603,14 +1598,14 @@@ static const struct ieee1394_device_id 
  		.specifier_id = IANA_SPECIFIER_ID,
  		.version      = RFC2734_SW_VERSION,
  	},
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	{
+ 		.match_flags  = IEEE1394_MATCH_SPECIFIER_ID |
+ 				IEEE1394_MATCH_VERSION,
+ 		.specifier_id = IANA_SPECIFIER_ID,
+ 		.version      = RFC3146_SW_VERSION,
+ 	},
+ #endif
  	{ }
  };
  
@@@ -1678,6 -1648,30 +1643,30 @@@ static struct fw_descriptor rfc2374_uni
  	.data   = rfc2374_unit_directory_data
  };
  
+ #if IS_ENABLED(CONFIG_IPV6)
+ static const u32 rfc3146_unit_directory_data[] = {
+ 	0x00040000,	/* directory_length		*/
+ 	0x1200005e,	/* unit_specifier_id: IANA	*/
+ 	0x81000003,	/* textual descriptor offset	*/
+ 	0x13000002,	/* unit_sw_version: RFC 3146	*/
+ 	0x81000005,	/* textual descriptor offset	*/
+ 	0x00030000,	/* descriptor_length		*/
+ 	0x00000000,	/* text				*/
+ 	0x00000000,	/* minimal ASCII, en		*/
+ 	0x49414e41,	/* I A N A			*/
+ 	0x00030000,	/* descriptor_length		*/
+ 	0x00000000,	/* text				*/
+ 	0x00000000,	/* minimal ASCII, en		*/
+ 	0x49507636,	/* I P v 6			*/
+ };
+ 
+ static struct fw_descriptor rfc3146_unit_directory = {
+ 	.length = ARRAY_SIZE(rfc3146_unit_directory_data),
+ 	.key    = (CSR_DIRECTORY | CSR_UNIT) << 24,
+ 	.data   = rfc3146_unit_directory_data
+ };
+ #endif
+ 
  static int __init fwnet_init(void)
  {
  	int err;
@@@ -1686,11 -1680,17 +1675,17 @@@
  	if (err)
  		return err;
  
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	err = fw_core_add_descriptor(&rfc3146_unit_directory);
+ 	if (err)
+ 		goto out;
+ #endif
+ 
  	fwnet_packet_task_cache = kmem_cache_create("packet_task",
  			sizeof(struct fwnet_packet_task), 0, 0, NULL);
  	if (!fwnet_packet_task_cache) {
  		err = -ENOMEM;
- 		goto out;
+ 		goto out2;
  	}
  
  	err = driver_register(&fwnet_driver.driver);
@@@ -1698,7 -1698,11 +1693,11 @@@
  		return 0;
  
  	kmem_cache_destroy(fwnet_packet_task_cache);
+ out2:
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	fw_core_remove_descriptor(&rfc3146_unit_directory);
  out:
+ #endif
  	fw_core_remove_descriptor(&rfc2374_unit_directory);
  
  	return err;
@@@ -1709,11 -1713,14 +1708,14 @@@ static void __exit fwnet_cleanup(void
  {
  	driver_unregister(&fwnet_driver.driver);
  	kmem_cache_destroy(fwnet_packet_task_cache);
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	fw_core_remove_descriptor(&rfc3146_unit_directory);
+ #endif
  	fw_core_remove_descriptor(&rfc2374_unit_directory);
  }
  module_exit(fwnet_cleanup);
  
  MODULE_AUTHOR("Jay Fenlason <fenlason at redhat.com>");
- MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734");
+ MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146");
  MODULE_LICENSE("GPL");
  MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --combined drivers/infiniband/hw/cxgb4/qp.c
index ed49ab3,5b059e2..2320404
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@@ -42,10 -42,21 +42,21 @@@ static int ocqp_support = 1
  module_param(ocqp_support, int, 0644);
  MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
  
- int db_fc_threshold = 2000;
+ int db_fc_threshold = 1000;
  module_param(db_fc_threshold, int, 0644);
- MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
- 		 "db flow control mode (default = 2000)");
+ MODULE_PARM_DESC(db_fc_threshold,
+ 		 "QP count/threshold that triggers"
+ 		 " automatic db flow control mode (default = 1000)");
+ 
+ int db_coalescing_threshold;
+ module_param(db_coalescing_threshold, int, 0644);
+ MODULE_PARM_DESC(db_coalescing_threshold,
+ 		 "QP count/threshold that triggers"
+ 		 " disabling db coalescing (default = 0)");
+ 
+ static int max_fr_immd = T4_MAX_FR_IMMD;
+ module_param(max_fr_immd, int, 0644);
+ MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
  
  static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
  {
@@@ -76,7 -87,7 +87,7 @@@ static void dealloc_sq(struct c4iw_rde
  
  static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
  {
- 	if (!ocqp_support || !t4_ocqp_supported())
+ 	if (!ocqp_support || !ocqp_supported(&rdev->lldi))
  		return -ENOSYS;
  	sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
  	if (!sq->dma_addr)
@@@ -100,16 -111,6 +111,16 @@@ static int alloc_host_sq(struct c4iw_rd
  	return 0;
  }
  
 +static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
 +{
 +	int ret = -ENOSYS;
 +	if (user)
 +		ret = alloc_oc_sq(rdev, sq);
 +	if (ret)
 +		ret = alloc_host_sq(rdev, sq);
 +	return ret;
 +}
 +
  static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
  		      struct c4iw_dev_ucontext *uctx)
  {
@@@ -139,7 -140,7 +150,7 @@@ static int create_qp(struct c4iw_rdev *
  	int wr_len;
  	struct c4iw_wr_wait wr_wait;
  	struct sk_buff *skb;
- 	int ret;
+ 	int ret = 0;
  	int eqsize;
  
  	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
@@@ -178,9 -179,15 +189,9 @@@
  		goto free_sw_rq;
  	}
  
 -	if (user) {
 -		if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
 -			goto free_hwaddr;
 -	} else {
 -		ret = alloc_host_sq(rdev, &wq->sq);
 -		if (ret)
 -			goto free_hwaddr;
 -	}
 -
 +	ret = alloc_sq(rdev, &wq->sq, user);
 +	if (ret)
 +		goto free_hwaddr;
  	memset(wq->sq.queue, 0, wq->sq.memsize);
  	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
  
@@@ -535,7 -542,7 +546,7 @@@ static int build_rdma_recv(struct c4iw_
  }
  
  static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
- 			 struct ib_send_wr *wr, u8 *len16)
+ 			 struct ib_send_wr *wr, u8 *len16, u8 t5dev)
  {
  
  	struct fw_ri_immd *imdp;
@@@ -557,28 -564,51 +568,51 @@@
  	wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
  	wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
  					0xffffffff);
- 	WARN_ON(pbllen > T4_MAX_FR_IMMD);
- 	imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
- 	imdp->op = FW_RI_DATA_IMMD;
- 	imdp->r1 = 0;
- 	imdp->r2 = 0;
- 	imdp->immdlen = cpu_to_be32(pbllen);
- 	p = (__be64 *)(imdp + 1);
- 	rem = pbllen;
- 	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
- 		*p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
- 		rem -= sizeof *p;
- 		if (++p == (__be64 *)&sq->queue[sq->size])
- 			p = (__be64 *)sq->queue;
- 	}
- 	BUG_ON(rem < 0);
- 	while (rem) {
- 		*p = 0;
- 		rem -= sizeof *p;
- 		if (++p == (__be64 *)&sq->queue[sq->size])
- 			p = (__be64 *)sq->queue;
+ 
+ 	if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
+ 		struct c4iw_fr_page_list *c4pl =
+ 			to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
+ 		struct fw_ri_dsgl *sglp;
+ 
+ 		for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ 			wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
+ 				cpu_to_be64((u64)
+ 				wr->wr.fast_reg.page_list->page_list[i]);
+ 		}
+ 
+ 		sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
+ 		sglp->op = FW_RI_DATA_DSGL;
+ 		sglp->r1 = 0;
+ 		sglp->nsge = cpu_to_be16(1);
+ 		sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
+ 		sglp->len0 = cpu_to_be32(pbllen);
+ 
+ 		*len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
+ 	} else {
+ 		imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ 		imdp->op = FW_RI_DATA_IMMD;
+ 		imdp->r1 = 0;
+ 		imdp->r2 = 0;
+ 		imdp->immdlen = cpu_to_be32(pbllen);
+ 		p = (__be64 *)(imdp + 1);
+ 		rem = pbllen;
+ 		for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ 			*p = cpu_to_be64(
+ 				(u64)wr->wr.fast_reg.page_list->page_list[i]);
+ 			rem -= sizeof(*p);
+ 			if (++p == (__be64 *)&sq->queue[sq->size])
+ 				p = (__be64 *)sq->queue;
+ 		}
+ 		BUG_ON(rem < 0);
+ 		while (rem) {
+ 			*p = 0;
+ 			rem -= sizeof(*p);
+ 			if (++p == (__be64 *)&sq->queue[sq->size])
+ 				p = (__be64 *)sq->queue;
+ 		}
+ 		*len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
+ 				      + pbllen, 16);
  	}
- 	*len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
  	return 0;
  }
  
@@@ -679,7 -709,10 +713,10 @@@ int c4iw_post_send(struct ib_qp *ibqp, 
  		case IB_WR_FAST_REG_MR:
  			fw_opcode = FW_RI_FR_NSMR_WR;
  			swsqe->opcode = FW_RI_FAST_REGISTER;
- 			err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
+ 			err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
+ 					    is_t5(
+ 					    qhp->rhp->rdev.lldi.adapter_type) ?
+ 					    1 : 0);
  			break;
  		case IB_WR_LOCAL_INV:
  			if (wr->send_flags & IB_SEND_FENCE)
@@@ -1451,6 -1484,9 +1488,9 @@@ int c4iw_destroy_qp(struct ib_qp *ib_qp
  		rhp->db_state = NORMAL;
  		idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
  	}
+ 	if (db_coalescing_threshold >= 0)
+ 		if (rhp->qpcnt <= db_coalescing_threshold)
+ 			cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
  	spin_unlock_irq(&rhp->lock);
  	atomic_dec(&qhp->refcnt);
  	wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
@@@ -1562,11 -1598,15 +1602,15 @@@ struct ib_qp *c4iw_create_qp(struct ib_
  	spin_lock_irq(&rhp->lock);
  	if (rhp->db_state != NORMAL)
  		t4_disable_wq_db(&qhp->wq);
- 	if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
+ 	rhp->qpcnt++;
+ 	if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
  		rhp->rdev.stats.db_state_transitions++;
  		rhp->db_state = FLOW_CONTROL;
  		idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
  	}
+ 	if (db_coalescing_threshold >= 0)
+ 		if (rhp->qpcnt > db_coalescing_threshold)
+ 			cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
  	ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
  	spin_unlock_irq(&rhp->lock);
  	if (ret)
diff --combined drivers/infiniband/hw/mlx4/cq.c
index dab4b51,73b3a71..d5e60f4
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@@ -33,7 -33,6 +33,7 @@@
  
  #include <linux/mlx4/cq.h>
  #include <linux/mlx4/qp.h>
 +#include <linux/mlx4/srq.h>
  #include <linux/slab.h>
  
  #include "mlx4_ib.h"
@@@ -229,7 -228,7 +229,7 @@@ struct ib_cq *mlx4_ib_create_cq(struct 
  		vector = dev->eq_table[vector % ibdev->num_comp_vectors];
  
  	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
- 			    cq->db.dma, &cq->mcq, vector, 0);
+ 			    cq->db.dma, &cq->mcq, vector, 0, 0);
  	if (err)
  		goto err_dbmap;
  
@@@ -586,7 -585,6 +586,7 @@@ static int mlx4_ib_poll_one(struct mlx4
  	struct mlx4_qp *mqp;
  	struct mlx4_ib_wq *wq;
  	struct mlx4_ib_srq *srq;
 +	struct mlx4_srq *msrq = NULL;
  	int is_send;
  	int is_error;
  	u32 g_mlpath_rqpn;
@@@ -655,20 -653,6 +655,20 @@@ repoll
  
  	wc->qp = &(*cur_qp)->ibqp;
  
 +	if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
 +		u32 srq_num;
 +		g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
 +		srq_num       = g_mlpath_rqpn & 0xffffff;
 +		/* SRQ is also in the radix tree */
 +		msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
 +				       srq_num);
 +		if (unlikely(!msrq)) {
 +			pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
 +				cq->mcq.cqn, srq_num);
 +			return -EINVAL;
 +		}
 +	}
 +
  	if (is_send) {
  		wq = &(*cur_qp)->sq;
  		if (!(*cur_qp)->sq_signal_bits) {
@@@ -682,11 -666,6 +682,11 @@@
  		wqe_ctr = be16_to_cpu(cqe->wqe_index);
  		wc->wr_id = srq->wrid[wqe_ctr];
  		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
 +	} else if (msrq) {
 +		srq = to_mibsrq(msrq);
 +		wqe_ctr = be16_to_cpu(cqe->wqe_index);
 +		wc->wr_id = srq->wrid[wqe_ctr];
 +		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
  	} else {
  		wq	  = &(*cur_qp)->rq;
  		tail	  = wq->tail & (wq->wqe_cnt - 1);
diff --combined drivers/infiniband/ulp/ipoib/ipoib_main.c
index 31dd2a7,554b906..b6e049a
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@@ -730,7 -730,8 +730,8 @@@ static int ipoib_start_xmit(struct sk_b
  		if ((header->proto != htons(ETH_P_IP)) &&
  		    (header->proto != htons(ETH_P_IPV6)) &&
  		    (header->proto != htons(ETH_P_ARP)) &&
- 		    (header->proto != htons(ETH_P_RARP))) {
+ 		    (header->proto != htons(ETH_P_RARP)) &&
+ 		    (header->proto != htons(ETH_P_TIPC))) {
  			/* ethertype not supported by IPoIB */
  			++dev->stats.tx_dropped;
  			dev_kfree_skb_any(skb);
@@@ -751,6 -752,7 +752,7 @@@
  	switch (header->proto) {
  	case htons(ETH_P_IP):
  	case htons(ETH_P_IPV6):
+ 	case htons(ETH_P_TIPC):
  		neigh = ipoib_neigh_get(dev, cb->hwaddr);
  		if (unlikely(!neigh)) {
  			neigh_add_path(skb, cb->hwaddr, dev);
@@@ -828,7 -830,7 +830,7 @@@ static int ipoib_hard_header(struct sk_
  	 */
  	memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
  
 -	return 0;
 +	return sizeof *header;
  }
  
  static void ipoib_set_mcast_list(struct net_device *dev)
diff --combined drivers/media/dvb-core/dvb_net.c
index aa17bdb,83a23af..f91c80c
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@@ -185,7 -185,7 +185,7 @@@ static __be16 dvb_net_eth_type_trans(st
  			skb->pkt_type=PACKET_MULTICAST;
  	}
  
- 	if (ntohs(eth->h_proto) >= 1536)
+ 	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
  		return eth->h_proto;
  
  	rawp = skb->data;
@@@ -228,9 -228,9 +228,9 @@@ static int ule_test_sndu( struct dvb_ne
  static int ule_bridged_sndu( struct dvb_net_priv *p )
  {
  	struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
- 	if(ntohs(hdr->h_proto) < 1536) {
+ 	if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) {
  		int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
- 		/* A frame Type < 1536 for a bridged frame, introduces a LLC Length field. */
+ 		/* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */
  		if(framelen != ntohs(hdr->h_proto)) {
  			return -1;
  		}
@@@ -320,7 -320,7 +320,7 @@@ static int handle_ule_extensions( struc
  			(int) p->ule_sndu_type, l, total_ext_len);
  #endif
  
- 	} while (p->ule_sndu_type < 1536);
+ 	} while (p->ule_sndu_type < ETH_P_802_3_MIN);
  
  	return total_ext_len;
  }
@@@ -712,7 -712,7 +712,7 @@@ static void dvb_net_ule( struct net_dev
  				}
  
  				/* Handle ULE Extension Headers. */
- 				if (priv->ule_sndu_type < 1536) {
+ 				if (priv->ule_sndu_type < ETH_P_802_3_MIN) {
  					/* There is an extension header.  Handle it accordingly. */
  					int l = handle_ule_extensions(priv);
  					if (l < 0) {
@@@ -1044,7 -1044,7 +1044,7 @@@ static int dvb_net_feed_start(struct ne
  		ret = priv->tsfeed->set(priv->tsfeed,
  					priv->pid, /* pid */
  					TS_PACKET, /* type */
 -					DMX_TS_PES_OTHER, /* pes type */
 +					DMX_PES_OTHER, /* pes type */
  					32768,     /* circular buffer size */
  					timeout    /* timeout */
  					);
@@@ -1479,8 -1479,11 +1479,8 @@@ static int dvb_net_close(struct inode *
  
  	dvb_generic_release(inode, file);
  
 -	if(dvbdev->users == 1 && dvbnet->exit == 1) {
 -		fops_put(file->f_op);
 -		file->f_op = NULL;
 +	if(dvbdev->users == 1 && dvbnet->exit == 1)
  		wake_up(&dvbdev->wait_queue);
 -	}
  	return 0;
  }
  
diff --combined drivers/net/bonding/bond_main.c
index 7db40de1,532153d..d0aade0
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@@ -428,14 -428,15 +428,15 @@@ int bond_dev_queue_xmit(struct bonding 
   * @bond_dev: bonding net device that got called
   * @vid: vlan id being added
   */
- static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
+ 				__be16 proto, u16 vid)
  {
  	struct bonding *bond = netdev_priv(bond_dev);
  	struct slave *slave, *stop_at;
  	int i, res;
  
  	bond_for_each_slave(bond, slave, i) {
- 		res = vlan_vid_add(slave->dev, vid);
+ 		res = vlan_vid_add(slave->dev, proto, vid);
  		if (res)
  			goto unwind;
  	}
@@@ -453,7 -454,7 +454,7 @@@ unwind
  	/* unwind from head to the slave that failed */
  	stop_at = slave;
  	bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
- 		vlan_vid_del(slave->dev, vid);
+ 		vlan_vid_del(slave->dev, proto, vid);
  
  	return res;
  }
@@@ -463,14 -464,15 +464,15 @@@
   * @bond_dev: bonding net device that got called
   * @vid: vlan id being removed
   */
- static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
+ 				 __be16 proto, u16 vid)
  {
  	struct bonding *bond = netdev_priv(bond_dev);
  	struct slave *slave;
  	int i, res;
  
  	bond_for_each_slave(bond, slave, i)
- 		vlan_vid_del(slave->dev, vid);
+ 		vlan_vid_del(slave->dev, proto, vid);
  
  	res = bond_del_vlan(bond, vid);
  	if (res) {
@@@ -488,7 -490,8 +490,8 @@@ static void bond_add_vlans_on_slave(str
  	int res;
  
  	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- 		res = vlan_vid_add(slave_dev, vlan->vlan_id);
+ 		res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
+ 				   vlan->vlan_id);
  		if (res)
  			pr_warning("%s: Failed to add vlan id %d to device %s\n",
  				   bond->dev->name, vlan->vlan_id,
@@@ -504,7 -507,7 +507,7 @@@ static void bond_del_vlans_from_slave(s
  	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
  		if (!vlan->vlan_id)
  			continue;
- 		vlan_vid_del(slave_dev, vlan->vlan_id);
+ 		vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
  	}
  }
  
@@@ -779,7 -782,7 +782,7 @@@ static void bond_resend_igmp_join_reque
  
  	/* rejoin all groups on vlan devices */
  	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- 		vlan_dev = __vlan_find_dev_deep(bond_dev,
+ 		vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
  						vlan->vlan_id);
  		if (vlan_dev)
  			__bond_resend_igmp_join_requests(vlan_dev);
@@@ -796,9 -799,8 +799,8 @@@ static void bond_resend_igmp_join_reque
  {
  	struct bonding *bond = container_of(work, struct bonding,
  					    mcast_work.work);
- 	rcu_read_lock();
+ 
  	bond_resend_igmp_join_requests(bond);
- 	rcu_read_unlock();
  }
  
  /*
@@@ -1915,16 -1917,14 +1917,16 @@@ err_detach
  	bond_detach_slave(bond, new_slave);
  	if (bond->primary_slave == new_slave)
  		bond->primary_slave = NULL;
 -	write_unlock_bh(&bond->lock);
  	if (bond->curr_active_slave == new_slave) {
 +		bond_change_active_slave(bond, NULL);
 +		write_unlock_bh(&bond->lock);
  		read_lock(&bond->lock);
  		write_lock_bh(&bond->curr_slave_lock);
  		bond_select_active_slave(bond);
  		write_unlock_bh(&bond->curr_slave_lock);
  		read_unlock(&bond->lock);
 +	} else {
 +		write_unlock_bh(&bond->lock);
  	}
  	slave_disable_netpoll(new_slave);
  
@@@ -2534,7 -2534,8 +2536,8 @@@ static int bond_has_this_ip(struct bond
  
  	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
  		rcu_read_lock();
- 		vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id);
+ 		vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q),
+ 						vlan->vlan_id);
  		rcu_read_unlock();
  		if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip))
  			return 1;
@@@ -2563,7 -2564,7 +2566,7 @@@ static void bond_arp_send(struct net_de
  		return;
  	}
  	if (vlan_id) {
- 		skb = vlan_put_tag(skb, vlan_id);
+ 		skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  		if (!skb) {
  			pr_err("failed to insert VLAN tag\n");
  			return;
@@@ -2625,6 -2626,7 +2628,7 @@@ static void bond_arp_send_all(struct bo
  		list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
  			rcu_read_lock();
  			vlan_dev = __vlan_find_dev_deep(bond->dev,
+ 							htons(ETH_P_8021Q),
  							vlan->vlan_id);
  			rcu_read_unlock();
  			if (vlan_dev == rt->dst.dev) {
@@@ -4260,6 -4262,37 +4264,37 @@@ void bond_set_mode_ops(struct bonding *
  	}
  }
  
+ static int bond_ethtool_get_settings(struct net_device *bond_dev,
+ 				     struct ethtool_cmd *ecmd)
+ {
+ 	struct bonding *bond = netdev_priv(bond_dev);
+ 	struct slave *slave;
+ 	int i;
+ 	unsigned long speed = 0;
+ 
+ 	ecmd->duplex = DUPLEX_UNKNOWN;
+ 	ecmd->port = PORT_OTHER;
+ 
+ 	/* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
+ 	 * do not need to check mode.  Though link speed might not represent
+ 	 * the true receive or transmit bandwidth (not all modes are symmetric)
+ 	 * this is an accurate maximum.
+ 	 */
+ 	read_lock(&bond->lock);
+ 	bond_for_each_slave(bond, slave, i) {
+ 		if (SLAVE_IS_OK(slave)) {
+ 			if (slave->speed != SPEED_UNKNOWN)
+ 				speed += slave->speed;
+ 			if (ecmd->duplex == DUPLEX_UNKNOWN &&
+ 			    slave->duplex != DUPLEX_UNKNOWN)
+ 				ecmd->duplex = slave->duplex;
+ 		}
+ 	}
+ 	ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
+ 	read_unlock(&bond->lock);
+ 	return 0;
+ }
+ 
  static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
  				     struct ethtool_drvinfo *drvinfo)
  {
@@@ -4271,6 -4304,7 +4306,7 @@@
  
  static const struct ethtool_ops bond_ethtool_ops = {
  	.get_drvinfo		= bond_ethtool_get_drvinfo,
+ 	.get_settings		= bond_ethtool_get_settings,
  	.get_link		= ethtool_op_get_link,
  };
  
@@@ -4361,9 -4395,9 +4397,9 @@@ static void bond_setup(struct net_devic
  	 */
  
  	bond_dev->hw_features = BOND_VLAN_FEATURES |
- 				NETIF_F_HW_VLAN_TX |
- 				NETIF_F_HW_VLAN_RX |
- 				NETIF_F_HW_VLAN_FILTER;
+ 				NETIF_F_HW_VLAN_CTAG_TX |
+ 				NETIF_F_HW_VLAN_CTAG_RX |
+ 				NETIF_F_HW_VLAN_CTAG_FILTER;
  
  	bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
  	bond_dev->features |= bond_dev->hw_features;
diff --combined drivers/net/caif/caif_serial.c
index be90deb,e56b56c..77be3cb
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@@ -1,6 -1,6 +1,6 @@@
  /*
   * Copyright (C) ST-Ericsson AB 2010
-  * Author:	Sjur Brendeland / sjur.brandeland at stericsson.com
+  * Author:	Sjur Brendeland
   * License terms: GNU General Public License (GPL) version 2
   */
  
@@@ -21,7 -21,7 +21,7 @@@
  #include <linux/debugfs.h>
  
  MODULE_LICENSE("GPL");
- MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland at stericsson.com>");
+ MODULE_AUTHOR("Sjur Brendeland");
  MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
  MODULE_LICENSE("GPL");
  MODULE_ALIAS_LDISC(N_CAIF);
@@@ -88,9 -88,11 +88,9 @@@ static inline void update_tty_status(st
  {
  	ser->tty_status =
  		ser->tty->stopped << 5 |
 -		ser->tty->hw_stopped << 4 |
  		ser->tty->flow_stopped << 3 |
  		ser->tty->packet << 2 |
 -		ser->tty->port->low_latency << 1 |
 -		ser->tty->warned;
 +		ser->tty->port->low_latency << 1;
  }
  static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
  {
diff --combined drivers/net/caif/caif_spi.c
index ae7e756,2fb279a..155db68
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@@ -1,7 -1,6 +1,6 @@@
  /*
   * Copyright (C) ST-Ericsson AB 2010
-  * Contact: Sjur Brendeland / sjur.brandeland at stericsson.com
-  * Author:  Daniel Martensson / Daniel.Martensson at stericsson.com
+  * Author:  Daniel Martensson
   * License terms: GNU General Public License (GPL) version 2.
   */
  
@@@ -29,7 -28,7 +28,7 @@@
  #endif /* CONFIG_CAIF_SPI_SYNC */
  
  MODULE_LICENSE("GPL");
- MODULE_AUTHOR("Daniel Martensson<daniel.martensson at stericsson.com>");
+ MODULE_AUTHOR("Daniel Martensson");
  MODULE_DESCRIPTION("CAIF SPI driver");
  
  /* Returns the number of padding bytes for alignment. */
@@@ -864,7 -863,6 +863,7 @@@ static int __init cfspi_init_module(voi
  	driver_remove_file(&cfspi_spi_driver.driver,
  			   &driver_attr_up_head_align);
   err_create_up_head_align:
 +	platform_driver_unregister(&cfspi_spi_driver);
   err_dev_register:
  	return result;
  }
diff --combined drivers/net/can/sja1000/ems_pcmcia.c
index 321c27e,a3aa681..9e535f2
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@@ -126,11 -126,11 +126,11 @@@ static irqreturn_t ems_pcmcia_interrupt
  static inline int ems_pcmcia_check_chan(struct sja1000_priv *priv)
  {
  	/* Make sure SJA1000 is in reset mode */
- 	ems_pcmcia_write_reg(priv, REG_MOD, 1);
- 	ems_pcmcia_write_reg(priv, REG_CDR, CDR_PELICAN);
+ 	ems_pcmcia_write_reg(priv, SJA1000_MOD, 1);
+ 	ems_pcmcia_write_reg(priv, SJA1000_CDR, CDR_PELICAN);
  
  	/* read reset-values */
- 	if (ems_pcmcia_read_reg(priv, REG_CDR) == CDR_PELICAN)
+ 	if (ems_pcmcia_read_reg(priv, SJA1000_CDR) == CDR_PELICAN)
  		return 1;
  
  	return 0;
@@@ -316,4 -316,15 +316,4 @@@ static struct pcmcia_driver ems_pcmcia_
  	.remove = ems_pcmcia_remove,
  	.id_table = ems_pcmcia_tbl,
  };
 -
 -static int __init ems_pcmcia_init(void)
 -{
 -	return pcmcia_register_driver(&ems_pcmcia_driver);
 -}
 -module_init(ems_pcmcia_init);
 -
 -static void __exit ems_pcmcia_exit(void)
 -{
 -	pcmcia_unregister_driver(&ems_pcmcia_driver);
 -}
 -module_exit(ems_pcmcia_exit);
 +module_pcmcia_driver(ems_pcmcia_driver);
diff --combined drivers/net/can/sja1000/peak_pcmcia.c
index 0a707f7,977901a..f7ad754
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@@ -196,7 -196,7 +196,7 @@@ static void pcan_write_canreg(const str
  	int c = (priv->reg_base - card->ioport_addr) / PCC_CHAN_SIZE;
  
  	/* sja1000 register changes control the leds state */
- 	if (port == REG_MOD)
+ 	if (port == SJA1000_MOD)
  		switch (v) {
  		case MOD_RM:
  			/* Reset Mode: set led on */
@@@ -509,11 -509,11 +509,11 @@@ static void pcan_free_channels(struct p
  static inline int pcan_channel_present(struct sja1000_priv *priv)
  {
  	/* make sure SJA1000 is in reset mode */
- 	pcan_write_canreg(priv, REG_MOD, 1);
- 	pcan_write_canreg(priv, REG_CDR, CDR_PELICAN);
+ 	pcan_write_canreg(priv, SJA1000_MOD, 1);
+ 	pcan_write_canreg(priv, SJA1000_CDR, CDR_PELICAN);
  
  	/* read reset-values */
- 	if (pcan_read_canreg(priv, REG_CDR) == CDR_PELICAN)
+ 	if (pcan_read_canreg(priv, SJA1000_CDR) == CDR_PELICAN)
  		return 1;
  
  	return 0;
@@@ -740,4 -740,15 +740,4 @@@ static struct pcmcia_driver pcan_drive
  	.remove = pcan_remove,
  	.id_table = pcan_table,
  };
 -
 -static int __init pcan_init(void)
 -{
 -	return pcmcia_register_driver(&pcan_driver);
 -}
 -module_init(pcan_init);
 -
 -static void __exit pcan_exit(void)
 -{
 -	pcmcia_unregister_driver(&pcan_driver);
 -}
 -module_exit(pcan_exit);
 +module_pcmcia_driver(pcan_driver);
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 51a6030,466b512..b8fbe26
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@@ -451,7 -451,8 +451,8 @@@ static void bnx2x_tpa_start(struct bnx2
   * Compute number of aggregated segments, and gso_type.
   */
  static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
- 				 u16 len_on_bd, unsigned int pkt_len)
+ 				 u16 len_on_bd, unsigned int pkt_len,
+ 				 u16 num_of_coalesced_segs)
  {
  	/* TPA aggregation won't have either IP options or TCP options
  	 * other than timestamp or IPv6 extension headers.
@@@ -480,8 -481,7 +481,7 @@@
  	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
  	 * to skb_shinfo(skb)->gso_segs
  	 */
- 	NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
- 					       skb_shinfo(skb)->gso_size);
+ 	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
  }
  
  static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@@ -537,7 -537,8 +537,8 @@@ static int bnx2x_fill_frag_skb(struct b
  	/* This is needed in order to enable forwarding support */
  	if (frag_size)
  		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
- 				     le16_to_cpu(cqe->pkt_len));
+ 				     le16_to_cpu(cqe->pkt_len),
+ 				     le16_to_cpu(cqe->num_of_coalesced_segs));
  
  #ifdef BNX2X_STOP_ON_ERROR
  	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@@ -641,6 -642,14 +642,14 @@@ static void bnx2x_gro_ipv6_csum(struct 
  	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
  				  &iph->saddr, &iph->daddr, 0);
  }
+ 
+ static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
+ 			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
+ {
+ 	skb_set_network_header(skb, 0);
+ 	gro_func(bp, skb);
+ 	tcp_gro_complete(skb);
+ }
  #endif
  
  static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@@ -648,19 -657,17 +657,17 @@@
  {
  #ifdef CONFIG_INET
  	if (skb_shinfo(skb)->gso_size) {
- 		skb_set_network_header(skb, 0);
  		switch (be16_to_cpu(skb->protocol)) {
  		case ETH_P_IP:
- 			bnx2x_gro_ip_csum(bp, skb);
+ 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
  			break;
  		case ETH_P_IPV6:
- 			bnx2x_gro_ipv6_csum(bp, skb);
+ 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
  			break;
  		default:
- 			BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ 			BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
  				  be16_to_cpu(skb->protocol));
  		}
- 		tcp_gro_complete(skb);
  	}
  #endif
  	napi_gro_receive(&fp->napi, skb);
@@@ -718,7 -725,7 +725,7 @@@ static void bnx2x_tpa_stop(struct bnx2
  		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
  					 skb, cqe, cqe_idx)) {
  			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
- 				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
+ 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
  			bnx2x_gro_receive(bp, fp, skb);
  		} else {
  			DP(NETIF_MSG_RX_STATUS,
@@@ -993,7 -1000,7 +1000,7 @@@ reuse_rx
  
  		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
  		    PARSING_FLAGS_VLAN)
- 			__vlan_hwaccel_put_tag(skb,
+ 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  					       le16_to_cpu(cqe_fp->vlan_tag));
  		napi_gro_receive(&fp->napi, skb);
  
@@@ -1037,7 -1044,6 +1044,7 @@@ static irqreturn_t bnx2x_msix_fp_int(in
  	DP(NETIF_MSG_INTR,
  	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
  	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
 +
  	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
  
  #ifdef BNX2X_STOP_ON_ERROR
@@@ -1719,7 -1725,7 +1726,7 @@@ static int bnx2x_req_irq(struct bnx2x *
  	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
  }
  
 -static int bnx2x_setup_irqs(struct bnx2x *bp)
 +int bnx2x_setup_irqs(struct bnx2x *bp)
  {
  	int rc = 0;
  	if (bp->flags & USING_MSIX_FLAG &&
@@@ -2010,7 -2016,7 +2017,7 @@@ static int bnx2x_init_hw(struct bnx2x *
   * Cleans the object that have internal lists without sending
   * ramrods. Should be run when interrutps are disabled.
   */
- static void bnx2x_squeeze_objects(struct bnx2x *bp)
+ void bnx2x_squeeze_objects(struct bnx2x *bp)
  {
  	int rc;
  	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@@ -2575,8 -2581,6 +2582,8 @@@ int bnx2x_nic_load(struct bnx2x *bp, in
  		}
  	}
  
 +	bnx2x_pre_irq_nic_init(bp);
 +
  	/* Connect to IRQs */
  	rc = bnx2x_setup_irqs(bp);
  	if (rc) {
@@@ -2586,11 -2590,11 +2593,11 @@@
  		LOAD_ERROR_EXIT(bp, load_error2);
  	}
  
 -	/* Setup NIC internals and enable interrupts */
 -	bnx2x_nic_init(bp, load_code);
 -
  	/* Init per-function objects */
  	if (IS_PF(bp)) {
 +		/* Setup NIC internals and enable interrupts */
 +		bnx2x_post_irq_nic_init(bp, load_code);
 +
  		bnx2x_init_bp_objs(bp);
  		bnx2x_iov_nic_init(bp);
  
@@@ -2660,7 -2664,8 +2667,8 @@@
  	if (IS_PF(bp))
  		rc = bnx2x_set_eth_mac(bp, true);
  	else /* vf */
- 		rc = bnx2x_vfpf_set_mac(bp);
+ 		rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
+ 					   true);
  	if (rc) {
  		BNX2X_ERR("Setting Ethernet MAC failed\n");
  		LOAD_ERROR_EXIT(bp, load_error3);
@@@ -2780,7 -2785,7 +2788,7 @@@ load_error0
  #endif /* ! BNX2X_STOP_ON_ERROR */
  }
  
- static int bnx2x_drain_tx_queues(struct bnx2x *bp)
+ int bnx2x_drain_tx_queues(struct bnx2x *bp)
  {
  	u8 rc = 0, cos, i;
  
@@@ -2929,9 -2934,9 +2937,9 @@@ int bnx2x_nic_unload(struct bnx2x *bp, 
  		bnx2x_free_fp_mem_cnic(bp);
  
  	if (IS_PF(bp)) {
- 		bnx2x_free_mem(bp);
  		if (CNIC_LOADED(bp))
  			bnx2x_free_mem_cnic(bp);
+ 		bnx2x_free_mem(bp);
  	}
  	bp->state = BNX2X_STATE_CLOSED;
  	bp->cnic_loaded = false;
@@@ -3092,11 -3097,11 +3100,11 @@@ int bnx2x_poll(struct napi_struct *napi
   * to ease the pain of our fellow microcode engineers
   * we use one mapping for both BDs
   */
- static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
- 				   struct bnx2x_fp_txdata *txdata,
- 				   struct sw_tx_bd *tx_buf,
- 				   struct eth_tx_start_bd **tx_bd, u16 hlen,
- 				   u16 bd_prod, int nbd)
+ static u16 bnx2x_tx_split(struct bnx2x *bp,
+ 			  struct bnx2x_fp_txdata *txdata,
+ 			  struct sw_tx_bd *tx_buf,
+ 			  struct eth_tx_start_bd **tx_bd, u16 hlen,
+ 			  u16 bd_prod)
  {
  	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
  	struct eth_tx_bd *d_tx_bd;
@@@ -3104,11 -3109,10 +3112,10 @@@
  	int old_len = le16_to_cpu(h_tx_bd->nbytes);
  
  	/* first fix first BD */
- 	h_tx_bd->nbd = cpu_to_le16(nbd);
  	h_tx_bd->nbytes = cpu_to_le16(hlen);
  
- 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x) nbd %d\n",
- 	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
+ 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x)\n",
+ 	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
  
  	/* now get a new data BD
  	 * (after the pbd) and fill it */
@@@ -3137,7 -3141,7 +3144,7 @@@
  
  #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
  #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
- static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+ static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
  {
  	__sum16 tsum = (__force __sum16) csum;
  
@@@ -3152,30 -3156,47 +3159,47 @@@
  	return bswab16(tsum);
  }
  
- static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
  {
  	u32 rc;
+ 	__u8 prot = 0;
+ 	__be16 protocol;
  
  	if (skb->ip_summed != CHECKSUM_PARTIAL)
- 		rc = XMIT_PLAIN;
+ 		return XMIT_PLAIN;
  
- 	else {
- 		if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
- 			rc = XMIT_CSUM_V6;
- 			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- 				rc |= XMIT_CSUM_TCP;
+ 	protocol = vlan_get_protocol(skb);
+ 	if (protocol == htons(ETH_P_IPV6)) {
+ 		rc = XMIT_CSUM_V6;
+ 		prot = ipv6_hdr(skb)->nexthdr;
+ 	} else {
+ 		rc = XMIT_CSUM_V4;
+ 		prot = ip_hdr(skb)->protocol;
+ 	}
  
+ 	if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
+ 		if (inner_ip_hdr(skb)->version == 6) {
+ 			rc |= XMIT_CSUM_ENC_V6;
+ 			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ 				rc |= XMIT_CSUM_TCP;
  		} else {
- 			rc = XMIT_CSUM_V4;
- 			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ 			rc |= XMIT_CSUM_ENC_V4;
+ 			if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
  				rc |= XMIT_CSUM_TCP;
  		}
  	}
+ 	if (prot == IPPROTO_TCP)
+ 		rc |= XMIT_CSUM_TCP;
  
- 	if (skb_is_gso_v6(skb))
- 		rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
- 	else if (skb_is_gso(skb))
- 		rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+ 	if (skb_is_gso_v6(skb)) {
+ 		rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ 		if (rc & XMIT_CSUM_ENC)
+ 			rc |= XMIT_GSO_ENC_V6;
+ 	} else if (skb_is_gso(skb)) {
+ 		rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+ 		if (rc & XMIT_CSUM_ENC)
+ 			rc |= XMIT_GSO_ENC_V4;
+ 	}
  
  	return rc;
  }
@@@ -3260,14 -3281,23 +3284,23 @@@ exit_lbl
  }
  #endif
  
- static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
- 					u32 xmit_type)
+ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ 				 u32 xmit_type)
  {
+ 	struct ipv6hdr *ipv6;
+ 
  	*parsing_data |= (skb_shinfo(skb)->gso_size <<
  			      ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
  			      ETH_TX_PARSE_BD_E2_LSO_MSS;
- 	if ((xmit_type & XMIT_GSO_V6) &&
- 	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ 
+ 	if (xmit_type & XMIT_GSO_ENC_V6)
+ 		ipv6 = inner_ipv6_hdr(skb);
+ 	else if (xmit_type & XMIT_GSO_V6)
+ 		ipv6 = ipv6_hdr(skb);
+ 	else
+ 		ipv6 = NULL;
+ 
+ 	if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
  		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
  }
  
@@@ -3278,13 -3308,13 +3311,13 @@@
   * @pbd:	parse BD
   * @xmit_type:	xmit flags
   */
- static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
- 				     struct eth_tx_parse_bd_e1x *pbd,
- 				     u32 xmit_type)
+ static void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ 			      struct eth_tx_parse_bd_e1x *pbd,
+ 			      u32 xmit_type)
  {
  	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  	pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
- 	pbd->tcp_flags = pbd_tcp_flags(skb);
+ 	pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
  
  	if (xmit_type & XMIT_GSO_V4) {
  		pbd->ip_id = bswab16(ip_hdr(skb)->id);
@@@ -3304,6 -3334,40 +3337,40 @@@
  }
  
  /**
+  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
+  *
+  * @bp:			driver handle
+  * @skb:		packet skb
+  * @parsing_data:	data to be updated
+  * @xmit_type:		xmit flags
+  *
+  * 57712/578xx related, when skb has encapsulation
+  */
+ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
+ 				 u32 *parsing_data, u32 xmit_type)
+ {
+ 	*parsing_data |=
+ 		((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+ 
+ 	if (xmit_type & XMIT_CSUM_TCP) {
+ 		*parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
+ 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+ 
+ 		return skb_inner_transport_header(skb) +
+ 			inner_tcp_hdrlen(skb) - skb->data;
+ 	}
+ 
+ 	/* We support checksum offload for TCP and UDP only.
+ 	 * No need to pass the UDP header length - it's a constant.
+ 	 */
+ 	return skb_inner_transport_header(skb) +
+ 		sizeof(struct udphdr) - skb->data;
+ }
+ 
+ /**
   * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
   *
   * @bp:			driver handle
@@@ -3311,15 -3375,15 +3378,15 @@@
   * @parsing_data:	data to be updated
   * @xmit_type:		xmit flags
   *
-  * 57712 related
+  * 57712/578xx related
   */
- static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- 					u32 *parsing_data, u32 xmit_type)
+ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ 				u32 *parsing_data, u32 xmit_type)
  {
  	*parsing_data |=
  		((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- 		ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- 		ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
  
  	if (xmit_type & XMIT_CSUM_TCP) {
  		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@@ -3334,17 -3398,15 +3401,15 @@@
  	return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
  }
  
- static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- 	struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+ /* set FW indication according to inner or outer protocols if tunneled */
+ static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ 			       struct eth_tx_start_bd *tx_start_bd,
+ 			       u32 xmit_type)
  {
  	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
  
- 	if (xmit_type & XMIT_CSUM_V4)
- 		tx_start_bd->bd_flags.as_bitfield |=
- 					ETH_TX_BD_FLAGS_IP_CSUM;
- 	else
- 		tx_start_bd->bd_flags.as_bitfield |=
- 					ETH_TX_BD_FLAGS_IPV6;
+ 	if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
+ 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
  
  	if (!(xmit_type & XMIT_CSUM_TCP))
  		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
@@@ -3358,9 -3420,9 +3423,9 @@@
   * @pbd:	parse BD to be updated
   * @xmit_type:	xmit flags
   */
- static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- 	struct eth_tx_parse_bd_e1x *pbd,
- 	u32 xmit_type)
+ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ 			     struct eth_tx_parse_bd_e1x *pbd,
+ 			     u32 xmit_type)
  {
  	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
  
@@@ -3406,6 -3468,75 +3471,75 @@@
  	return hlen;
  }
  
+ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
+ 				      struct eth_tx_parse_bd_e2 *pbd_e2,
+ 				      struct eth_tx_parse_2nd_bd *pbd2,
+ 				      u16 *global_data,
+ 				      u32 xmit_type)
+ {
+ 	u16 hlen_w = 0;
+ 	u8 outerip_off, outerip_len = 0;
+ 	/* from outer IP to transport */
+ 	hlen_w = (skb_inner_transport_header(skb) -
+ 		  skb_network_header(skb)) >> 1;
+ 
+ 	/* transport len */
+ 	if (xmit_type & XMIT_CSUM_TCP)
+ 		hlen_w += inner_tcp_hdrlen(skb) >> 1;
+ 	else
+ 		hlen_w += sizeof(struct udphdr) >> 1;
+ 
+ 	pbd2->fw_ip_hdr_to_payload_w = hlen_w;
+ 
+ 	if (xmit_type & XMIT_CSUM_ENC_V4) {
+ 		struct iphdr *iph = ip_hdr(skb);
+ 		pbd2->fw_ip_csum_wo_len_flags_frag =
+ 			bswab16(csum_fold((~iph->check) -
+ 					  iph->tot_len - iph->frag_off));
+ 	} else {
+ 		pbd2->fw_ip_hdr_to_payload_w =
+ 			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+ 	}
+ 
+ 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
+ 
+ 	pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
+ 
+ 	if (xmit_type & XMIT_GSO_V4) {
+ 		pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
+ 
+ 		pbd_e2->data.tunnel_data.pseudo_csum =
+ 			bswab16(~csum_tcpudp_magic(
+ 					inner_ip_hdr(skb)->saddr,
+ 					inner_ip_hdr(skb)->daddr,
+ 					0, IPPROTO_TCP, 0));
+ 
+ 		outerip_len = ip_hdr(skb)->ihl << 1;
+ 	} else {
+ 		pbd_e2->data.tunnel_data.pseudo_csum =
+ 			bswab16(~csum_ipv6_magic(
+ 					&inner_ipv6_hdr(skb)->saddr,
+ 					&inner_ipv6_hdr(skb)->daddr,
+ 					0, IPPROTO_TCP, 0));
+ 	}
+ 
+ 	outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+ 
+ 	*global_data |=
+ 		outerip_off |
+ 		(!!(xmit_type & XMIT_CSUM_V6) <<
+ 			ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
+ 		(outerip_len <<
+ 			ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
+ 		((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ 			ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
+ 
+ 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ 		SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
+ 		pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
+ 	}
+ }
+ 
  /* called with netif_tx_lock
   * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
   * netif_wake_queue()
@@@ -3421,6 -3552,7 +3555,7 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_
  	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
  	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
  	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ 	struct eth_tx_parse_2nd_bd *pbd2 = NULL;
  	u32 pbd_e2_parsing_data = 0;
  	u16 pkt_prod, bd_prod;
  	int nbd, txq_index;
@@@ -3488,7 -3620,7 +3623,7 @@@
  			mac_type = MULTICAST_ADDRESS;
  	}
  
- #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+ #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
  	/* First, check if we need to linearize the skb (due to FW
  	   restrictions). No need to check fragmentation if page size > 8K
  	   (there will be no violation to FW restrictions) */
@@@ -3536,12 -3668,9 +3671,9 @@@
  	first_bd = tx_start_bd;
  
  	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- 	SET_FLAG(tx_start_bd->general_data,
- 		 ETH_TX_START_BD_PARSE_NBDS,
- 		 0);
  
- 	/* header nbd */
- 	SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+ 	/* header nbd: indirectly zero other flags! */
+ 	tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
  
  	/* remember the first BD of the packet */
  	tx_buf->first_bd = txdata->tx_bd_prod;
@@@ -3561,19 -3690,16 +3693,16 @@@
  		/* when transmitting in a vf, start bd must hold the ethertype
  		 * for fw to enforce it
  		 */
- #ifndef BNX2X_STOP_ON_ERROR
- 		if (IS_VF(bp)) {
- #endif
+ 		if (IS_VF(bp))
  			tx_start_bd->vlan_or_ethertype =
  				cpu_to_le16(ntohs(eth->h_proto));
- #ifndef BNX2X_STOP_ON_ERROR
- 		} else {
+ 		else
  			/* used by FW for packet accounting */
  			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
- 		}
- #endif
  	}
  
+ 	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+ 
  	/* turn on parsing and get a BD */
  	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  
@@@ -3583,23 -3709,58 +3712,58 @@@
  	if (!CHIP_IS_E1x(bp)) {
  		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
  		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
- 		/* Set PBD in checksum offload case */
- 		if (xmit_type & XMIT_CSUM)
+ 
+ 		if (xmit_type & XMIT_CSUM_ENC) {
+ 			u16 global_data = 0;
+ 
+ 			/* Set PBD in enc checksum offload case */
+ 			hlen = bnx2x_set_pbd_csum_enc(bp, skb,
+ 						      &pbd_e2_parsing_data,
+ 						      xmit_type);
+ 
+ 			/* turn on 2nd parsing and get a BD */
+ 			bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+ 
+ 			pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
+ 
+ 			memset(pbd2, 0, sizeof(*pbd2));
+ 
+ 			pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
+ 				(skb_inner_network_header(skb) -
+ 				 skb->data) >> 1;
+ 
+ 			if (xmit_type & XMIT_GSO_ENC)
+ 				bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
+ 							  &global_data,
+ 							  xmit_type);
+ 
+ 			pbd2->global_data = cpu_to_le16(global_data);
+ 
+ 			/* add addition parse BD indication to start BD */
+ 			SET_FLAG(tx_start_bd->general_data,
+ 				 ETH_TX_START_BD_PARSE_NBDS, 1);
+ 			/* set encapsulation flag in start BD */
+ 			SET_FLAG(tx_start_bd->general_data,
+ 				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
+ 			nbd++;
+ 		} else if (xmit_type & XMIT_CSUM) {
+ 			/* Set PBD in checksum offload case w/o encapsulation */
  			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
  						     &pbd_e2_parsing_data,
  						     xmit_type);
+ 		}
  
- 		if (IS_MF_SI(bp) || IS_VF(bp)) {
- 			/* fill in the MAC addresses in the PBD - for local
- 			 * switching
- 			 */
- 			bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
- 					      &pbd_e2->src_mac_addr_mid,
- 					      &pbd_e2->src_mac_addr_lo,
+ 		/* Add the macs to the parsing BD this is a vf */
+ 		if (IS_VF(bp)) {
+ 			/* override GRE parameters in BD */
+ 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ 					      &pbd_e2->data.mac_addr.src_mid,
+ 					      &pbd_e2->data.mac_addr.src_lo,
  					      eth->h_source);
- 			bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
- 					      &pbd_e2->dst_mac_addr_mid,
- 					      &pbd_e2->dst_mac_addr_lo,
+ 
+ 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ 					      &pbd_e2->data.mac_addr.dst_mid,
+ 					      &pbd_e2->data.mac_addr.dst_lo,
  					      eth->h_dest);
  		}
  
@@@ -3621,14 -3782,13 +3785,13 @@@
  	/* Setup the data pointer of the first BD of the packet */
  	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- 	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
  	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
  	pkt_size = tx_start_bd->nbytes;
  
  	DP(NETIF_MSG_TX_QUEUED,
- 	   "first bd @%p  addr (%x:%x)  nbd %d  nbytes %d  flags %x  vlan %x\n",
+ 	   "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
  	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
- 	   le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+ 	   le16_to_cpu(tx_start_bd->nbytes),
  	   tx_start_bd->bd_flags.as_bitfield,
  	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
  
@@@ -3641,10 -3801,12 +3804,12 @@@
  
  		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
  
- 		if (unlikely(skb_headlen(skb) > hlen))
+ 		if (unlikely(skb_headlen(skb) > hlen)) {
+ 			nbd++;
  			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
  						 &tx_start_bd, hlen,
- 						 bd_prod, ++nbd);
+ 						 bd_prod);
+ 		}
  		if (!CHIP_IS_E1x(bp))
  			bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
  					     xmit_type);
@@@ -3734,9 -3896,13 +3899,13 @@@
  	if (pbd_e2)
  		DP(NETIF_MSG_TX_QUEUED,
  		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
- 		   pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
- 		   pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
- 		   pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ 		   pbd_e2,
+ 		   pbd_e2->data.mac_addr.dst_hi,
+ 		   pbd_e2->data.mac_addr.dst_mid,
+ 		   pbd_e2->data.mac_addr.dst_lo,
+ 		   pbd_e2->data.mac_addr.src_hi,
+ 		   pbd_e2->data.mac_addr.src_mid,
+ 		   pbd_e2->data.mac_addr.src_lo,
  		   pbd_e2->parsing_data);
  	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
  
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c3a65d0,54e1b14..151675d
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@@ -50,13 -50,13 +50,13 @@@ extern int int_mode
  		} \
  	} while (0)
  
- #define BNX2X_PCI_ALLOC(x, y, size) \
- 	do { \
- 		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- 		if (x == NULL) \
- 			goto alloc_mem_err; \
- 		memset((void *)x, 0, size); \
- 	} while (0)
+ #define BNX2X_PCI_ALLOC(x, y, size)				\
+ do {								\
+ 	x = dma_alloc_coherent(&bp->pdev->dev, size, y,		\
+ 			       GFP_KERNEL | __GFP_ZERO);	\
+ 	if (x == NULL)						\
+ 		goto alloc_mem_err;				\
+ } while (0)
  
  #define BNX2X_ALLOC(x, size) \
  	do { \
@@@ -295,29 -295,16 +295,29 @@@ void bnx2x_int_disable_sync(struct bnx2
  void bnx2x_nic_init_cnic(struct bnx2x *bp);
  
  /**
 - * bnx2x_nic_init - init driver internals.
 + * bnx2x_preirq_nic_init - init driver internals.
   *
   * @bp:		driver handle
   *
   * Initializes:
 - *  - rings
 + *  - fastpath object
 + *  - fastpath rings
 + *  etc.
 + */
 +void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
 +
 +/**
 + * bnx2x_postirq_nic_init - init driver internals.
 + *
 + * @bp:		driver handle
 + * @load_code:	COMMON, PORT or FUNCTION
 + *
 + * Initializes:
   *  - status blocks
 + *  - slowpath rings
   *  - etc.
   */
 -void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
 +void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
  /**
   * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
   *
@@@ -509,7 -496,10 +509,10 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_
  /* setup_tc callback */
  int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
  
+ int bnx2x_get_vf_config(struct net_device *dev, int vf,
+ 			struct ifla_vf_info *ivi);
  int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
  
  /* select_queue callback */
  u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
@@@ -847,7 -837,7 +850,7 @@@ static inline void bnx2x_add_all_napi_c
  	/* Add NAPI objects */
  	for_each_rx_queue_cnic(bp, i)
  		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- 			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ 			       bnx2x_poll, NAPI_POLL_WEIGHT);
  }
  
  static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@@ -857,7 -847,7 +860,7 @@@
  	/* Add NAPI objects */
  	for_each_eth_queue(bp, i)
  		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- 			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ 			       bnx2x_poll, NAPI_POLL_WEIGHT);
  }
  
  static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
@@@ -983,6 -973,9 +986,9 @@@ static inline int bnx2x_func_start(stru
  	else /* CHIP_IS_E1X */
  		start_params->network_cos_mode = FW_WRR;
  
+ 	start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ 	start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
+ 
  	return bnx2x_func_state_change(bp, &func_params);
  }
  
@@@ -1409,4 -1402,8 +1415,8 @@@ static inline bool bnx2x_is_valid_ether
   *
   */
  void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
+ 
+ int bnx2x_drain_tx_queues(struct bnx2x *bp);
+ void bnx2x_squeeze_objects(struct bnx2x *bp);
+ 
  #endif /* BNX2X_CMN_H */
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a8f1ee3,91a0434..725dc52
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@@ -75,8 -75,6 +75,6 @@@
  #define FW_FILE_NAME_E1H	"bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
  #define FW_FILE_NAME_E2		"bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
  
- #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
- 
  /* Time in jiffies before concluding the transmitter is hung */
  #define TX_TIMEOUT		(5*HZ)
  
@@@ -2955,14 -2953,16 +2953,16 @@@ static unsigned long bnx2x_get_common_f
  	__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
  
  	/* tx only connections collect statistics (on the same index as the
- 	 *  parent connection). The statistics are zeroed when the parent
- 	 *  connection is initialized.
+ 	 * parent connection). The statistics are zeroed when the parent
+ 	 * connection is initialized.
  	 */
  
  	__set_bit(BNX2X_Q_FLG_STATS, &flags);
  	if (zero_stats)
  		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
  
+ 	__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
+ 	__set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
  
  #ifdef BNX2X_STOP_ON_ERROR
  	__set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@@ -3227,16 -3227,29 +3227,29 @@@ static void bnx2x_drv_info_ether_stat(s
  {
  	struct eth_stats_info *ether_stat =
  		&bp->slowpath->drv_info_to_mcp.ether_stat;
+ 	struct bnx2x_vlan_mac_obj *mac_obj =
+ 		&bp->sp_objs->mac_obj;
+ 	int i;
  
  	strlcpy(ether_stat->version, DRV_MODULE_VERSION,
  		ETH_STAT_INFO_VERSION_LEN);
  
- 	bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
- 					DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- 					ether_stat->mac_local);
- 
+ 	/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+ 	 * mac_local field in ether_stat struct. The base address is offset by 2
+ 	 * bytes to account for the field being 8 bytes but a mac address is
+ 	 * only 6 bytes. Likewise, the stride for the get_n_elements function is
+ 	 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+ 	 * allocated by the ether_stat struct, so the macs will land in their
+ 	 * proper positions.
+ 	 */
+ 	for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+ 		memset(ether_stat->mac_local + i, 0,
+ 		       sizeof(ether_stat->mac_local[0]));
+ 	mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ 				DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ 				ether_stat->mac_local + MAC_PAD, MAC_PAD,
+ 				ETH_ALEN);
  	ether_stat->mtu_size = bp->dev->mtu;
- 
  	if (bp->dev->features & NETIF_F_RXCSUM)
  		ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
  	if (bp->dev->features & NETIF_F_TSO)
@@@ -3258,8 -3271,7 +3271,7 @@@ static void bnx2x_drv_info_fcoe_stat(st
  	if (!CNIC_LOADED(bp))
  		return;
  
- 	memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
- 	       bp->fip_mac, ETH_ALEN);
+ 	memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
  
  	fcoe_stat->qos_priority =
  		app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@@ -3361,8 -3373,8 +3373,8 @@@ static void bnx2x_drv_info_iscsi_stat(s
  	if (!CNIC_LOADED(bp))
  		return;
  
- 	memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
- 	       bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ 	memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+ 	       ETH_ALEN);
  
  	iscsi_stat->qos_priority =
  		app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@@ -6018,11 -6030,10 +6030,11 @@@ void bnx2x_nic_init_cnic(struct bnx2x *
  	mmiowb();
  }
  
 -void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 +void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
  {
  	int i;
  
 +	/* Setup NIC internals and enable interrupts */
  	for_each_eth_queue(bp, i)
  		bnx2x_init_eth_fp(bp, i);
  
@@@ -6030,22 -6041,19 +6042,24 @@@
  	rmb();
  	bnx2x_init_rx_rings(bp);
  	bnx2x_init_tx_rings(bp);
 -	if (IS_VF(bp)) {
 +
 +	if (IS_PF(bp)) {
 +		/* Initialize MOD_ABS interrupts */
 +		bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
 +				       bp->common.shmem_base,
 +				       bp->common.shmem2_base, BP_PORT(bp));
 +
 +		/* initialize the default status block and sp ring */
 +		bnx2x_init_def_sb(bp);
 +		bnx2x_update_dsb_idx(bp);
 +		bnx2x_init_sp_ring(bp);
++	} else {
+ 		bnx2x_memset_stats(bp);
 -		return;
  	}
 +}
  
 -	/* Initialize MOD_ABS interrupts */
 -	bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
 -			       bp->common.shmem_base, bp->common.shmem2_base,
 -			       BP_PORT(bp));
 -
 -	bnx2x_init_def_sb(bp);
 -	bnx2x_update_dsb_idx(bp);
 -	bnx2x_init_sp_ring(bp);
 +void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
 +{
  	bnx2x_init_eq_ring(bp);
  	bnx2x_init_internal(bp, load_code);
  	bnx2x_pf_init(bp);
@@@ -6063,7 -6071,12 +6077,7 @@@
  				   AEU_INPUTS_ATTN_BITS_SPIO5);
  }
  
 -/* end of nic init */
 -
 -/*
 - * gzip service functions
 - */
 -
 +/* gzip service functions */
  static int bnx2x_gunzip_init(struct bnx2x *bp)
  {
  	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
@@@ -7773,7 -7786,7 +7787,7 @@@ int bnx2x_alloc_mem_cnic(struct bnx2x *
  				sizeof(struct
  				       host_hc_status_block_e1x));
  
- 	if (CONFIGURE_NIC_MODE(bp))
+ 	if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
  		/* allocate searcher T2 table, as it wan't allocated before */
  		BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
  
@@@ -7796,7 -7809,7 +7810,7 @@@ int bnx2x_alloc_mem(struct bnx2x *bp
  {
  	int i, allocated, context_size;
  
- 	if (!CONFIGURE_NIC_MODE(bp))
+ 	if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
  		/* allocate searcher T2 table */
  		BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
  
@@@ -7917,8 -7930,6 +7931,6 @@@ int bnx2x_del_all_macs(struct bnx2x *bp
  
  int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
  {
- 	unsigned long ramrod_flags = 0;
- 
  	if (is_zero_ether_addr(bp->dev->dev_addr) &&
  	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
  		DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
@@@ -7926,12 -7937,18 +7938,18 @@@
  		return 0;
  	}
  
- 	DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+ 	if (IS_PF(bp)) {
+ 		unsigned long ramrod_flags = 0;
  
- 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- 	/* Eth MAC is set on RSS leading client (fp[0]) */
- 	return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
- 				 set, BNX2X_ETH_MAC, &ramrod_flags);
+ 		DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+ 		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ 		return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
+ 					 &bp->sp_objs->mac_obj, set,
+ 					 BNX2X_ETH_MAC, &ramrod_flags);
+ 	} else { /* vf */
+ 		return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
+ 					     bp->fp->index, true);
+ 	}
  }
  
  int bnx2x_setup_leading(struct bnx2x *bp)
@@@ -9525,6 -9542,10 +9543,10 @@@ sp_rtnl_not_reset
  		bnx2x_vfpf_storm_rx_mode(bp);
  	}
  
+ 	if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ 			       &bp->sp_rtnl_state))
+ 		bnx2x_pf_set_vfs_vlan(bp);
+ 
  	/* work which needs rtnl lock not-taken (as it takes the lock itself and
  	 * can be called from other contexts as well)
  	 */
@@@ -9532,8 -9553,10 +9554,10 @@@
  
  	/* enable SR-IOV if applicable */
  	if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
- 					       &bp->sp_rtnl_state))
+ 					       &bp->sp_rtnl_state)) {
+ 		bnx2x_disable_sriov(bp);
  		bnx2x_enable_sriov(bp);
+ 	}
  }
  
  static void bnx2x_period_task(struct work_struct *work)
@@@ -9701,6 -9724,31 +9725,31 @@@ static struct bnx2x_prev_path_list 
  	return NULL;
  }
  
+ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
+ {
+ 	struct bnx2x_prev_path_list *tmp_list;
+ 	int rc;
+ 
+ 	rc = down_interruptible(&bnx2x_prev_sem);
+ 	if (rc) {
+ 		BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ 		return rc;
+ 	}
+ 
+ 	tmp_list = bnx2x_prev_path_get_entry(bp);
+ 	if (tmp_list) {
+ 		tmp_list->aer = 1;
+ 		rc = 0;
+ 	} else {
+ 		BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
+ 			  BP_PATH(bp));
+ 	}
+ 
+ 	up(&bnx2x_prev_sem);
+ 
+ 	return rc;
+ }
+ 
  static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
  {
  	struct bnx2x_prev_path_list *tmp_list;
@@@ -9709,14 -9757,15 +9758,15 @@@
  	if (down_trylock(&bnx2x_prev_sem))
  		return false;
  
- 	list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
- 		if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
- 		    bp->pdev->bus->number == tmp_list->bus &&
- 		    BP_PATH(bp) == tmp_list->path) {
+ 	tmp_list = bnx2x_prev_path_get_entry(bp);
+ 	if (tmp_list) {
+ 		if (tmp_list->aer) {
+ 			DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
+ 			   BP_PATH(bp));
+ 		} else {
  			rc = true;
  			BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
  				       BP_PATH(bp));
- 			break;
  		}
  	}
  
@@@ -9730,6 -9779,28 +9780,28 @@@ static int bnx2x_prev_mark_path(struct 
  	struct bnx2x_prev_path_list *tmp_list;
  	int rc;
  
+ 	rc = down_interruptible(&bnx2x_prev_sem);
+ 	if (rc) {
+ 		BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ 		return rc;
+ 	}
+ 
+ 	/* Check whether the entry for this path already exists */
+ 	tmp_list = bnx2x_prev_path_get_entry(bp);
+ 	if (tmp_list) {
+ 		if (!tmp_list->aer) {
+ 			BNX2X_ERR("Re-Marking the path.\n");
+ 		} else {
+ 			DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
+ 			   BP_PATH(bp));
+ 			tmp_list->aer = 0;
+ 		}
+ 		up(&bnx2x_prev_sem);
+ 		return 0;
+ 	}
+ 	up(&bnx2x_prev_sem);
+ 
+ 	/* Create an entry for this path and add it */
  	tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
  	if (!tmp_list) {
  		BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
@@@ -9739,6 -9810,7 +9811,7 @@@
  	tmp_list->bus = bp->pdev->bus->number;
  	tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
  	tmp_list->path = BP_PATH(bp);
+ 	tmp_list->aer = 0;
  	tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
  
  	rc = down_interruptible(&bnx2x_prev_sem);
@@@ -9746,8 -9818,8 +9819,8 @@@
  		BNX2X_ERR("Received %d when tried to take lock\n", rc);
  		kfree(tmp_list);
  	} else {
- 		BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
- 				BP_PATH(bp));
+ 		DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
+ 		   BP_PATH(bp));
  		list_add(&tmp_list->list, &bnx2x_prev_list);
  		up(&bnx2x_prev_sem);
  	}
@@@ -9990,6 -10062,7 +10063,7 @@@ static int bnx2x_prev_unload(struct bnx
  	}
  
  	do {
+ 		int aer = 0;
  		/* Lock MCP using an unload request */
  		fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
  		if (!fw) {
@@@ -9998,7 -10071,18 +10072,18 @@@
  			break;
  		}
  
- 		if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ 		rc = down_interruptible(&bnx2x_prev_sem);
+ 		if (rc) {
+ 			BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
+ 				  rc);
+ 		} else {
+ 			/* If Path is marked by EEH, ignore unload status */
+ 			aer = !!(bnx2x_prev_path_get_entry(bp) &&
+ 				 bnx2x_prev_path_get_entry(bp)->aer);
+ 			up(&bnx2x_prev_sem);
+ 		}
+ 
+ 		if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
  			rc = bnx2x_prev_unload_common(bp);
  			break;
  		}
@@@ -10038,8 -10122,12 +10123,12 @@@ static void bnx2x_get_common_hwinfo(str
  	id = ((val & 0xffff) << 16);
  	val = REG_RD(bp, MISC_REG_CHIP_REV);
  	id |= ((val & 0xf) << 12);
- 	val = REG_RD(bp, MISC_REG_CHIP_METAL);
- 	id |= ((val & 0xff) << 4);
+ 
+ 	/* Metal is read from PCI regs, but we can't access >=0x400 from
+ 	 * the configuration space (so we need to reg_rd)
+ 	 */
+ 	val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+ 	id |= (((val >> 24) & 0xf) << 4);
  	val = REG_RD(bp, MISC_REG_BOND_ID);
  	id |= (val & 0xf);
  	bp->common.chip_id = id;
@@@ -10703,6 -10791,12 +10792,12 @@@ static void bnx2x_get_fcoe_info(struct 
  		(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
  		BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
  
+ 	/* Calculate the number of maximum allowed FCoE tasks */
+ 	bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
+ 	if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp))
+ 		bp->cnic_eth_dev.max_fcoe_exchanges /=
+ 						MAX_FCOE_FUNCS_PER_ENGINE;
+ 
  	/* Read the WWN: */
  	if (!IS_MF(bp)) {
  		/* Port info */
@@@ -10816,14 -10910,12 +10911,12 @@@ static void bnx2x_get_cnic_mac_hwinfo(s
  			}
  		}
  
- 		if (IS_MF_STORAGE_SD(bp))
- 			/* Zero primary MAC configuration */
- 			memset(bp->dev->dev_addr, 0, ETH_ALEN);
- 
- 		if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
- 			/* use FIP MAC as primary MAC */
+ 		/* If this is a storage-only interface, use SAN mac as
+ 		 * primary MAC. Notice that for SD this is already the case,
+ 		 * as the SAN mac was copied from the primary MAC.
+ 		 */
+ 		if (IS_MF_FCOE_AFEX(bp))
  			memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
- 
  	} else {
  		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
  				iscsi_mac_upper);
@@@ -11060,6 -11152,9 +11153,9 @@@ static int bnx2x_get_hwinfo(struct bnx2
  				} else
  					BNX2X_DEV_INFO("illegal OV for SD\n");
  				break;
+ 			case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+ 				bp->mf_config[vn] = 0;
+ 				break;
  			default:
  				/* Unknown configuration: reset mf_config */
  				bp->mf_config[vn] = 0;
@@@ -11406,26 -11501,6 +11502,6 @@@ static int bnx2x_init_bp(struct bnx2x *
   * net_device service functions
   */
  
- static int bnx2x_open_epilog(struct bnx2x *bp)
- {
- 	/* Enable sriov via delayed work. This must be done via delayed work
- 	 * because it causes the probe of the vf devices to be run, which invoke
- 	 * register_netdevice which must have rtnl lock taken. As we are holding
- 	 * the lock right now, that could only work if the probe would not take
- 	 * the lock. However, as the probe of the vf may be called from other
- 	 * contexts as well (such as passthrough to vm failes) it can't assume
- 	 * the lock is being held for it. Using delayed work here allows the
- 	 * probe code to simply take the lock (i.e. wait for it to be released
- 	 * if it is being held).
- 	 */
- 	smp_mb__before_clear_bit();
- 	set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- 	smp_mb__after_clear_bit();
- 	schedule_delayed_work(&bp->sp_rtnl_task, 0);
- 
- 	return 0;
- }
- 
  /* called with rtnl_lock */
  static int bnx2x_open(struct net_device *dev)
  {
@@@ -11795,6 -11870,8 +11871,8 @@@ static const struct net_device_ops bnx2
  	.ndo_setup_tc		= bnx2x_setup_tc,
  #ifdef CONFIG_BNX2X_SRIOV
  	.ndo_set_vf_mac		= bnx2x_set_vf_mac,
+ 	.ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
+ 	.ndo_get_vf_config	= bnx2x_get_vf_config,
  #endif
  #ifdef NETDEV_FCOE_WWNN
  	.ndo_fcoe_get_wwn	= bnx2x_fcoe_get_wwn,
@@@ -11957,19 -12034,26 +12035,26 @@@ static int bnx2x_init_dev(struct bnx2x 
  	dev->watchdog_timeo = TX_TIMEOUT;
  
  	dev->netdev_ops = &bnx2x_netdev_ops;
- 	bnx2x_set_ethtool_ops(dev);
+ 	bnx2x_set_ethtool_ops(bp, dev);
  
  	dev->priv_flags |= IFF_UNICAST_FLT;
  
  	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
  		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
- 		NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+ 		NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
+ 	if (!CHIP_IS_E1x(bp)) {
+ 		dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ 		dev->hw_enc_features =
+ 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ 			NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ 			NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ 	}
  
  	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
  
- 	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
+ 	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
  	if (bp->flags & USING_DAC_FLAG)
  		dev->features |= NETIF_F_HIGHDMA;
  
@@@ -12451,7 -12535,7 +12536,7 @@@ static int bnx2x_init_one(struct pci_de
  	 * l2 connections.
  	 */
  	if (IS_VF(bp)) {
- 		bnx2x_vf_map_doorbells(bp);
+ 		bp->doorbells = bnx2x_vf_doorbells(bp);
  		rc = bnx2x_vf_pci_alloc(bp);
  		if (rc)
  			goto init_one_exit;
@@@ -12479,13 -12563,8 +12564,8 @@@
  			goto init_one_exit;
  	}
  
- 	/* Enable SRIOV if capability found in configuration space.
- 	 * Once the generic SR-IOV framework makes it in from the
- 	 * pci tree this will be revised, to allow dynamic control
- 	 * over the number of VFs. Right now, change the num of vfs
- 	 * param below to enable SR-IOV.
- 	 */
- 	rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ 	/* Enable SRIOV if capability found in configuration space */
+ 	rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
  	if (rc)
  		goto init_one_exit;
  
@@@ -12497,16 -12576,6 +12577,6 @@@
  	if (CHIP_IS_E1x(bp))
  		bp->flags |= NO_FCOE_FLAG;
  
- 	/* disable FCOE for 57840 device, until FW supports it */
- 	switch (ent->driver_data) {
- 	case BCM57840_O:
- 	case BCM57840_4_10:
- 	case BCM57840_2_20:
- 	case BCM57840_MFO:
- 	case BCM57840_MF:
- 		bp->flags |= NO_FCOE_FLAG;
- 	}
- 
  	/* Set bp->num_queues for MSI-X mode*/
  	bnx2x_set_num_queues(bp);
  
@@@ -12640,9 -12709,7 +12710,7 @@@ static void bnx2x_remove_one(struct pci
  
  static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
  {
- 	int i;
- 
- 	bp->state = BNX2X_STATE_ERROR;
+ 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
  
  	bp->rx_mode = BNX2X_RX_MODE_NONE;
  
@@@ -12651,29 -12718,21 +12719,21 @@@
  
  	/* Stop Tx */
  	bnx2x_tx_disable(bp);
- 
- 	bnx2x_netif_stop(bp, 0);
  	/* Delete all NAPI objects */
  	bnx2x_del_all_napi(bp);
  	if (CNIC_LOADED(bp))
  		bnx2x_del_all_napi_cnic(bp);
+ 	netdev_reset_tc(bp->dev);
  
  	del_timer_sync(&bp->timer);
+ 	cancel_delayed_work(&bp->sp_task);
+ 	cancel_delayed_work(&bp->period_task);
  
- 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- 
- 	/* Release IRQs */
- 	bnx2x_free_irq(bp);
- 
- 	/* Free SKBs, SGEs, TPA pool and driver internals */
- 	bnx2x_free_skbs(bp);
- 
- 	for_each_rx_queue(bp, i)
- 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
- 
- 	bnx2x_free_mem(bp);
+ 	spin_lock_bh(&bp->stats_lock);
+ 	bp->stats_state = STATS_STATE_DISABLED;
+ 	spin_unlock_bh(&bp->stats_lock);
  
- 	bp->state = BNX2X_STATE_CLOSED;
+ 	bnx2x_save_statistics(bp);
  
  	netif_carrier_off(bp->dev);
  
@@@ -12709,6 -12768,8 +12769,8 @@@ static pci_ers_result_t bnx2x_io_error_
  
  	rtnl_lock();
  
+ 	BNX2X_ERR("IO error detected\n");
+ 
  	netif_device_detach(dev);
  
  	if (state == pci_channel_io_perm_failure) {
@@@ -12719,6 -12780,8 +12781,8 @@@
  	if (netif_running(dev))
  		bnx2x_eeh_nic_unload(bp);
  
+ 	bnx2x_prev_path_mark_eeh(bp);
+ 
  	pci_disable_device(pdev);
  
  	rtnl_unlock();
@@@ -12737,9 -12800,10 +12801,10 @@@ static pci_ers_result_t bnx2x_io_slot_r
  {
  	struct net_device *dev = pci_get_drvdata(pdev);
  	struct bnx2x *bp = netdev_priv(dev);
+ 	int i;
  
  	rtnl_lock();
- 
+ 	BNX2X_ERR("IO slot reset initializing...\n");
  	if (pci_enable_device(pdev)) {
  		dev_err(&pdev->dev,
  			"Cannot re-enable PCI device after reset\n");
@@@ -12749,10 -12813,47 +12814,47 @@@
  
  	pci_set_master(pdev);
  	pci_restore_state(pdev);
+ 	pci_save_state(pdev);
  
  	if (netif_running(dev))
  		bnx2x_set_power_state(bp, PCI_D0);
  
+ 	if (netif_running(dev)) {
+ 		BNX2X_ERR("IO slot reset --> driver unload\n");
+ 		if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ 			u32 v;
+ 
+ 			v = SHMEM2_RD(bp,
+ 				      drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ 			SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ 				  v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ 		}
+ 		bnx2x_drain_tx_queues(bp);
+ 		bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
+ 		bnx2x_netif_stop(bp, 1);
+ 		bnx2x_free_irq(bp);
+ 
+ 		/* Report UNLOAD_DONE to MCP */
+ 		bnx2x_send_unload_done(bp, true);
+ 
+ 		bp->sp_state = 0;
+ 		bp->port.pmf = 0;
+ 
+ 		bnx2x_prev_unload(bp);
+ 
+ 		/* We should have resetted the engine, so It's fair to
+ 		 * assume the FW will no longer write to the bnx2x driver.
+ 		 */
+ 		bnx2x_squeeze_objects(bp);
+ 		bnx2x_free_skbs(bp);
+ 		for_each_rx_queue(bp, i)
+ 			bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+ 		bnx2x_free_fp_mem(bp);
+ 		bnx2x_free_mem(bp);
+ 
+ 		bp->state = BNX2X_STATE_CLOSED;
+ 	}
+ 
  	rtnl_unlock();
  
  	return PCI_ERS_RESULT_RECOVERED;
@@@ -12779,6 -12880,9 +12881,9 @@@ static void bnx2x_io_resume(struct pci_
  
  	bnx2x_eeh_recover(bp);
  
+ 	bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ 							DRV_MSG_SEQ_NUMBER_MASK;
+ 
  	if (netif_running(dev))
  		bnx2x_nic_load(bp, LOAD_NORMAL);
  
@@@ -12801,6 -12905,9 +12906,9 @@@ static struct pci_driver bnx2x_pci_driv
  	.suspend     = bnx2x_suspend,
  	.resume      = bnx2x_resume,
  	.err_handler = &bnx2x_err_handler,
+ #ifdef CONFIG_BNX2X_SRIOV
+ 	.sriov_configure = bnx2x_sriov_configure,
+ #endif
  };
  
  static int __init bnx2x_init(void)
diff --combined drivers/net/ethernet/emulex/benet/be.h
index 941aa1f,9045903..d74db93
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@@ -1,5 -1,5 +1,5 @@@
  /*
-  * Copyright (C) 2005 - 2011 Emulex
+  * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -214,6 -214,7 +214,7 @@@ struct be_tx_stats 
  };
  
  struct be_tx_obj {
+ 	u32 db_offset;
  	struct be_queue_info q;
  	struct be_queue_info cq;
  	/* Remember the skbs that were transmitted */
@@@ -292,7 -293,7 +293,7 @@@ struct be_drv_stats 
  	u32 rx_in_range_errors;
  	u32 rx_out_range_errors;
  	u32 rx_frame_too_long;
- 	u32 rx_address_mismatch_drops;
+ 	u32 rx_address_filtered;
  	u32 rx_dropped_too_small;
  	u32 rx_dropped_too_short;
  	u32 rx_dropped_header_too_small;
@@@ -328,7 -329,6 +329,7 @@@ enum vf_state 
  #define BE_FLAGS_WORKER_SCHEDULED		(1 << 3)
  #define BE_UC_PMAC_COUNT		30
  #define BE_VF_UC_PMAC_COUNT		2
 +#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD		(1 << 11)
  
  struct phy_info {
  	u8 transceiver;
@@@ -435,7 -435,7 +436,8 @@@ struct be_adapter 
  	u8 wol_cap;
  	bool wol;
  	u32 uc_macs;		/* Count of secondary UC MAC programmed */
 +	u16 qnq_vid;
+ 	u16 asic_rev;
  	u32 msg_enable;
  	int be_get_temp_freq;
  	u16 max_mcast_mac;
@@@ -447,6 -447,7 +449,7 @@@
  	u16 max_event_queues;
  	u32 if_cap_flags;
  	u8 pf_number;
+ 	u64 rss_flags;
  };
  
  #define be_physfn(adapter)		(!adapter->virtfn)
@@@ -650,11 -651,6 +653,11 @@@ static inline bool be_is_wol_excluded(s
  	}
  }
  
 +static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
 +{
 +	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
 +}
 +
  extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
  		u16 num_popped);
  extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
diff --combined drivers/net/ethernet/emulex/benet/be_cmds.c
index 24c80d1,9080c27..25d3290
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@@ -1,5 -1,5 +1,5 @@@
  /*
-  * Copyright (C) 2005 - 2011 Emulex
+  * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -263,27 -263,6 +263,27 @@@ static void be_async_grp5_evt_process(s
  	}
  }
  
 +static void be_async_dbg_evt_process(struct be_adapter *adapter,
 +		u32 trailer, struct be_mcc_compl *cmp)
 +{
 +	u8 event_type = 0;
 +	struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
 +
 +	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
 +		ASYNC_TRAILER_EVENT_TYPE_MASK;
 +
 +	switch (event_type) {
 +	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
 +		if (evt->valid)
 +			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
 +		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
 +	break;
 +	default:
 +		dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
 +	break;
 +	}
 +}
 +
  static inline bool is_link_state_evt(u32 trailer)
  {
  	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
@@@ -298,13 -277,6 +298,13 @@@ static inline bool is_grp5_evt(u32 trai
  				ASYNC_EVENT_CODE_GRP_5);
  }
  
 +static inline bool is_dbg_evt(u32 trailer)
 +{
 +	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
 +		ASYNC_TRAILER_EVENT_CODE_MASK) ==
 +				ASYNC_EVENT_CODE_QNQ);
 +}
 +
  static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  {
  	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
@@@ -353,9 -325,6 +353,9 @@@ int be_process_mcc(struct be_adapter *a
  			else if (is_grp5_evt(compl->flags))
  				be_async_grp5_evt_process(adapter,
  				compl->flags, compl);
 +			else if (is_dbg_evt(compl->flags))
 +				be_async_dbg_evt_process(adapter,
 +				compl->flags, compl);
  		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  				status = be_mcc_compl_process(adapter, compl);
  				atomic_dec(&mcc_obj->q.used);
@@@ -718,10 -687,8 +718,8 @@@ static struct be_mcc_wrb *wrb_from_mccq
  	if (!mccq->created)
  		return NULL;
  
- 	if (atomic_read(&mccq->used) >= mccq->len) {
- 		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
+ 	if (atomic_read(&mccq->used) >= mccq->len)
  		return NULL;
- 	}
  
  	wrb = queue_head_node(mccq);
  	queue_head_inc(mccq);
@@@ -1053,7 -1020,6 +1051,7 @@@ int be_cmd_mccq_ext_create(struct be_ad
  
  	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
  	req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
 +	req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
  	be_dws_cpu_to_le(ctxt, sizeof(req->context));
  
  	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@@ -1127,15 -1093,14 +1125,14 @@@ int be_cmd_mccq_create(struct be_adapte
  	return status;
  }
  
- int be_cmd_txq_create(struct be_adapter *adapter,
- 			struct be_queue_info *txq,
- 			struct be_queue_info *cq)
+ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
  {
  	struct be_mcc_wrb *wrb;
  	struct be_cmd_req_eth_tx_create *req;
+ 	struct be_queue_info *txq = &txo->q;
+ 	struct be_queue_info *cq = &txo->cq;
  	struct be_dma_mem *q_mem = &txq->dma_mem;
- 	void *ctxt;
- 	int status;
+ 	int status, ver = 0;
  
  	spin_lock_bh(&adapter->mcc_lock);
  
@@@ -1146,34 -1111,37 +1143,37 @@@
  	}
  
  	req = embedded_payload(wrb);
- 	ctxt = &req->context;
  
  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  		OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
  
  	if (lancer_chip(adapter)) {
  		req->hdr.version = 1;
- 		AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
- 					adapter->if_handle);
+ 		req->if_id = cpu_to_le16(adapter->if_handle);
+ 	} else if (BEx_chip(adapter)) {
+ 		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
+ 			req->hdr.version = 2;
+ 	} else { /* For SH */
+ 		req->hdr.version = 2;
  	}
  
  	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  	req->ulp_num = BE_ULP1_NUM;
  	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
- 
- 	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
- 		be_encoded_q_len(txq->len));
- 	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
- 	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
- 
- 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
- 
+ 	req->cq_id = cpu_to_le16(cq->id);
+ 	req->queue_size = be_encoded_q_len(txq->len);
  	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  
+ 	ver = req->hdr.version;
+ 
  	status = be_mcc_notify_wait(adapter);
  	if (!status) {
  		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  		txq->id = le16_to_cpu(resp->cid);
+ 		if (ver == 2)
+ 			txo->db_offset = le32_to_cpu(resp->db_offset);
+ 		else
+ 			txo->db_offset = DB_TXULP1_OFFSET;
  		txq->created = true;
  	}
  
@@@ -1866,7 -1834,7 +1866,7 @@@ err
  
  /* Uses mbox */
  int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
- 		u32 *mode, u32 *caps)
+ 			u32 *mode, u32 *caps, u16 *asic_rev)
  {
  	struct be_mcc_wrb *wrb;
  	struct be_cmd_req_query_fw_cfg *req;
@@@ -1887,6 -1855,7 +1887,7 @@@
  		*port_num = le32_to_cpu(resp->phys_port);
  		*mode = le32_to_cpu(resp->function_mode);
  		*caps = le32_to_cpu(resp->function_caps);
+ 		*asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
  	}
  
  	mutex_unlock(&adapter->mbox_lock);
@@@ -1929,7 -1898,8 +1930,8 @@@ int be_cmd_reset_function(struct be_ada
  	return status;
  }
  
- int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
+ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ 			u32 rss_hash_opts, u16 table_size)
  {
  	struct be_mcc_wrb *wrb;
  	struct be_cmd_req_rss_config *req;
@@@ -1948,16 -1918,12 +1950,12 @@@
  		OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
  
  	req->if_id = cpu_to_le32(adapter->if_handle);
- 	req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
- 				      RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
+ 	req->enable_rss = cpu_to_le16(rss_hash_opts);
+ 	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
  
- 	if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
+ 	if (lancer_chip(adapter) || skyhawk_chip(adapter))
  		req->hdr.version = 1;
- 		req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
- 					       RSS_ENABLE_UDP_IPV6);
- 	}
  
- 	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
  	memcpy(req->cpu_table, rsstable, table_size);
  	memcpy(req->hash, myhash, sizeof(myhash));
  	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
@@@ -2375,7 -2341,6 +2373,6 @@@ int be_cmd_get_seeprom_data(struct be_a
  {
  	struct be_mcc_wrb *wrb;
  	struct be_cmd_req_seeprom_read *req;
- 	struct be_sge *sge;
  	int status;
  
  	spin_lock_bh(&adapter->mcc_lock);
@@@ -2386,7 -2351,6 +2383,6 @@@
  		goto err;
  	}
  	req = nonemb_cmd->va;
- 	sge = nonembedded_sgl(wrb);
  
  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  			OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
@@@ -2493,9 -2457,6 +2489,9 @@@ int be_cmd_get_cntl_attributes(struct b
  	struct mgmt_controller_attrib *attribs;
  	struct be_dma_mem attribs_cmd;
  
 +	if (mutex_lock_interruptible(&adapter->mbox_lock))
 +		return -1;
 +
  	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
  	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
  	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
@@@ -2503,10 -2464,12 +2499,10 @@@
  	if (!attribs_cmd.va) {
  		dev_err(&adapter->pdev->dev,
  				"Memory allocation failure\n");
 -		return -ENOMEM;
 +		status = -ENOMEM;
 +		goto err;
  	}
  
 -	if (mutex_lock_interruptible(&adapter->mbox_lock))
 -		return -1;
 -
  	wrb = wrb_from_mbox(adapter);
  	if (!wrb) {
  		status = -EBUSY;
@@@ -2526,9 -2489,8 +2522,9 @@@
  
  err:
  	mutex_unlock(&adapter->mbox_lock);
 -	pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
 -					attribs_cmd.dma);
 +	if (attribs_cmd.va)
 +		pci_free_consistent(adapter->pdev, attribs_cmd.size,
 +				    attribs_cmd.va, attribs_cmd.dma);
  	return status;
  }
  
@@@ -2701,10 -2663,8 +2697,8 @@@ int be_cmd_set_mac_list(struct be_adapt
  	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
  	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
  			&cmd.dma, GFP_KERNEL);
- 	if (!cmd.va) {
- 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ 	if (!cmd.va)
  		return -ENOMEM;
- 	}
  
  	spin_lock_bh(&adapter->mcc_lock);
  
@@@ -2828,9 -2788,6 +2822,9 @@@ int be_cmd_get_acpi_wol_cap(struct be_a
  			    CMD_SUBSYSTEM_ETH))
  		return -EPERM;
  
 +	if (mutex_lock_interruptible(&adapter->mbox_lock))
 +		return -1;
 +
  	memset(&cmd, 0, sizeof(struct be_dma_mem));
  	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
  	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
@@@ -2838,10 -2795,12 +2832,10 @@@
  	if (!cmd.va) {
  		dev_err(&adapter->pdev->dev,
  				"Memory allocation failure\n");
 -		return -ENOMEM;
 +		status = -ENOMEM;
 +		goto err;
  	}
  
 -	if (mutex_lock_interruptible(&adapter->mbox_lock))
 -		return -1;
 -
  	wrb = wrb_from_mbox(adapter);
  	if (!wrb) {
  		status = -EBUSY;
@@@ -2872,8 -2831,7 +2866,8 @@@
  	}
  err:
  	mutex_unlock(&adapter->mbox_lock);
 -	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
 +	if (cmd.va)
 +		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
  	return status;
  
  }
@@@ -2978,14 -2936,15 +2972,15 @@@ static struct be_nic_resource_desc *be_
  	int i;
  
  	for (i = 0; i < desc_count; i++) {
- 		desc->desc_len = RESOURCE_DESC_SIZE;
+ 		desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
  		if (((void *)desc + desc->desc_len) >
  		    (void *)(buf + max_buf_size)) {
  			desc = NULL;
  			break;
  		}
  
- 		if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
+ 		if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
+ 		    desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
  			break;
  
  		desc = (void *)desc + desc->desc_len;
@@@ -3005,18 -2964,16 +3000,18 @@@ int be_cmd_get_func_config(struct be_ad
  	int status;
  	struct be_dma_mem cmd;
  
 +	if (mutex_lock_interruptible(&adapter->mbox_lock))
 +		return -1;
 +
  	memset(&cmd, 0, sizeof(struct be_dma_mem));
  	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
  	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
  				      &cmd.dma);
  	if (!cmd.va) {
  		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
 -		return -ENOMEM;
 +		status = -ENOMEM;
 +		goto err;
  	}
 -	if (mutex_lock_interruptible(&adapter->mbox_lock))
 -		return -1;
  
  	wrb = wrb_from_mbox(adapter);
  	if (!wrb) {
@@@ -3030,6 -2987,9 +3025,9 @@@
  			       OPCODE_COMMON_GET_FUNC_CONFIG,
  			       cmd.size, wrb, &cmd);
  
+ 	if (skyhawk_chip(adapter))
+ 		req->hdr.version = 1;
+ 
  	status = be_mbox_notify_wait(adapter);
  	if (!status) {
  		struct be_cmd_resp_get_func_config *resp = cmd.va;
@@@ -3056,28 -3016,46 +3054,46 @@@
  	}
  err:
  	mutex_unlock(&adapter->mbox_lock);
 -	pci_free_consistent(adapter->pdev, cmd.size,
 -			    cmd.va, cmd.dma);
 +	if (cmd.va)
 +		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
  	return status;
  }
  
-  /* Uses sync mcc */
- int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- 			      u8 domain)
+ /* Uses mbox */
+ int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
+ 				   u8 domain, struct be_dma_mem *cmd)
  {
  	struct be_mcc_wrb *wrb;
  	struct be_cmd_req_get_profile_config *req;
  	int status;
- 	struct be_dma_mem cmd;
  
- 	memset(&cmd, 0, sizeof(struct be_dma_mem));
- 	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- 				      &cmd.dma);
- 	if (!cmd.va) {
- 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
- 		return -ENOMEM;
- 	}
+ 	if (mutex_lock_interruptible(&adapter->mbox_lock))
+ 		return -1;
+ 	wrb = wrb_from_mbox(adapter);
+ 
+ 	req = cmd->va;
+ 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ 			       OPCODE_COMMON_GET_PROFILE_CONFIG,
+ 			       cmd->size, wrb, cmd);
+ 
+ 	req->type = ACTIVE_PROFILE_TYPE;
+ 	req->hdr.domain = domain;
+ 	if (!lancer_chip(adapter))
+ 		req->hdr.version = 1;
+ 
+ 	status = be_mbox_notify_wait(adapter);
+ 
+ 	mutex_unlock(&adapter->mbox_lock);
+ 	return status;
+ }
+ 
+ /* Uses sync mcc */
+ int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
+ 				   u8 domain, struct be_dma_mem *cmd)
+ {
+ 	struct be_mcc_wrb *wrb;
+ 	struct be_cmd_req_get_profile_config *req;
+ 	int status;
  
  	spin_lock_bh(&adapter->mcc_lock);
  
@@@ -3087,16 -3065,47 +3103,47 @@@
  		goto err;
  	}
  
- 	req = cmd.va;
- 
+ 	req = cmd->va;
  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  			       OPCODE_COMMON_GET_PROFILE_CONFIG,
- 			       cmd.size, wrb, &cmd);
+ 			       cmd->size, wrb, cmd);
  
  	req->type = ACTIVE_PROFILE_TYPE;
  	req->hdr.domain = domain;
+ 	if (!lancer_chip(adapter))
+ 		req->hdr.version = 1;
  
  	status = be_mcc_notify_wait(adapter);
+ 
+ err:
+ 	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+ /* Uses sync mcc, if MCCQ is already created otherwise mbox */
+ int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
+ 			      u16 *txq_count, u8 domain)
+ {
+ 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
+ 	struct be_dma_mem cmd;
+ 	int status;
+ 
+ 	memset(&cmd, 0, sizeof(struct be_dma_mem));
+ 	if (!lancer_chip(adapter))
+ 		cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1);
+ 	else
+ 		cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+ 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ 				      &cmd.dma);
+ 	if (!cmd.va) {
+ 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ 		return -ENOMEM;
+ 	}
+ 
+ 	if (!mccq->created)
+ 		status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
+ 	else
+ 		status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
  	if (!status) {
  		struct be_cmd_resp_get_profile_config *resp = cmd.va;
  		u32 desc_count = le32_to_cpu(resp->desc_count);
@@@ -3109,12 -3118,15 +3156,15 @@@
  			status = -EINVAL;
  			goto err;
  		}
- 		*cap_flags = le32_to_cpu(desc->cap_flags);
+ 		if (cap_flags)
+ 			*cap_flags = le32_to_cpu(desc->cap_flags);
+ 		if (txq_count)
+ 			*txq_count = le32_to_cpu(desc->txq_count);
  	}
  err:
- 	spin_unlock_bh(&adapter->mcc_lock);
- 	pci_free_consistent(adapter->pdev, cmd.size,
- 			    cmd.va, cmd.dma);
+ 	if (cmd.va)
+ 		pci_free_consistent(adapter->pdev, cmd.size,
+ 				    cmd.va, cmd.dma);
  	return status;
  }
  
@@@ -3143,7 -3155,7 +3193,7 @@@ int be_cmd_set_profile_config(struct be
  	req->hdr.domain = domain;
  	req->desc_count = cpu_to_le32(1);
  
- 	req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
+ 	req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
  	req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
  	req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
  	req->nic_desc.pf_num = adapter->pf_number;
@@@ -3240,6 -3252,31 +3290,31 @@@ err
  	return status;
  }
  
+ int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
+ {
+ 	struct be_mcc_wrb *wrb;
+ 	struct be_cmd_req_intr_set *req;
+ 	int status;
+ 
+ 	if (mutex_lock_interruptible(&adapter->mbox_lock))
+ 		return -1;
+ 
+ 	wrb = wrb_from_mbox(adapter);
+ 
+ 	req = embedded_payload(wrb);
+ 
+ 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ 			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
+ 			       wrb, NULL);
+ 
+ 	req->intr_enabled = intr_enable;
+ 
+ 	status = be_mbox_notify_wait(adapter);
+ 
+ 	mutex_unlock(&adapter->mbox_lock);
+ 	return status;
+ }
+ 
  int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
  			int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
  {
diff --combined drivers/net/ethernet/emulex/benet/be_cmds.h
index 07fd927,1b01e9b..a855668
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@@ -1,5 -1,5 +1,5 @@@
  /*
-  * Copyright (C) 2005 - 2011 Emulex
+  * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -84,9 -84,6 +84,9 @@@ struct be_mcc_compl 
  #define ASYNC_EVENT_QOS_SPEED		0x1
  #define ASYNC_EVENT_COS_PRIORITY	0x2
  #define ASYNC_EVENT_PVID_STATE		0x3
 +#define ASYNC_EVENT_CODE_QNQ		0x6
 +#define ASYNC_DEBUG_EVENT_TYPE_QNQ	1
 +
  struct be_async_event_trailer {
  	u32 code;
  };
@@@ -147,16 -144,6 +147,16 @@@ struct be_async_event_grp5_pvid_state 
  	struct be_async_event_trailer trailer;
  } __packed;
  
 +/* async event indicating outer VLAN tag in QnQ */
 +struct be_async_event_qnq {
 +	u8 valid;	/* Indicates if outer VLAN is valid */
 +	u8 rsvd0;
 +	u16 vlan_tag;
 +	u32 event_tag;
 +	u8 rsvd1[4];
 +	struct be_async_event_trailer trailer;
 +} __packed;
 +
  struct be_mcc_mailbox {
  	struct be_mcc_wrb wrb;
  	struct be_mcc_compl compl;
@@@ -201,6 -188,7 +201,7 @@@
  #define OPCODE_COMMON_GET_BEACON_STATE			70
  #define OPCODE_COMMON_READ_TRANSRECV_DATA		73
  #define OPCODE_COMMON_GET_PORT_NAME			77
+ #define OPCODE_COMMON_SET_INTERRUPT_ENABLE		89
  #define OPCODE_COMMON_GET_PHY_DETAILS			102
  #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP		103
  #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES	121
@@@ -486,46 -474,27 +487,27 @@@ struct be_cmd_resp_mcc_create 
  #define BE_ETH_TX_RING_TYPE_STANDARD    	2
  #define BE_ULP1_NUM				1
  
- /* Pseudo amap definition in which each bit of the actual structure is defined
-  * as a byte: used to calculate offset/shift/mask of each field */
- struct amap_tx_context {
- 	u8 if_id[16];		/* dword 0 */
- 	u8 tx_ring_size[4];	/* dword 0 */
- 	u8 rsvd1[26];		/* dword 0 */
- 	u8 pci_func_id[8];	/* dword 1 */
- 	u8 rsvd2[9];		/* dword 1 */
- 	u8 ctx_valid;		/* dword 1 */
- 	u8 cq_id_send[16];	/* dword 2 */
- 	u8 rsvd3[16];		/* dword 2 */
- 	u8 rsvd4[32];		/* dword 3 */
- 	u8 rsvd5[32];		/* dword 4 */
- 	u8 rsvd6[32];		/* dword 5 */
- 	u8 rsvd7[32];		/* dword 6 */
- 	u8 rsvd8[32];		/* dword 7 */
- 	u8 rsvd9[32];		/* dword 8 */
- 	u8 rsvd10[32];		/* dword 9 */
- 	u8 rsvd11[32];		/* dword 10 */
- 	u8 rsvd12[32];		/* dword 11 */
- 	u8 rsvd13[32];		/* dword 12 */
- 	u8 rsvd14[32];		/* dword 13 */
- 	u8 rsvd15[32];		/* dword 14 */
- 	u8 rsvd16[32];		/* dword 15 */
- } __packed;
- 
  struct be_cmd_req_eth_tx_create {
  	struct be_cmd_req_hdr hdr;
  	u8 num_pages;
  	u8 ulp_num;
- 	u8 type;
- 	u8 bound_port;
- 	u8 context[sizeof(struct amap_tx_context) / 8];
+ 	u16 type;
+ 	u16 if_id;
+ 	u8 queue_size;
+ 	u8 rsvd0;
+ 	u32 rsvd1;
+ 	u16 cq_id;
+ 	u16 rsvd2;
+ 	u32 rsvd3[13];
  	struct phys_addr pages[8];
  } __packed;
  
  struct be_cmd_resp_eth_tx_create {
  	struct be_cmd_resp_hdr hdr;
  	u16 cid;
- 	u16 rsvd0;
+ 	u16 rid;
+ 	u32 db_offset;
+ 	u32 rsvd0[4];
  } __packed;
  
  /******************** Create RxQ ***************************/
@@@ -621,8 -590,8 +603,8 @@@ struct be_port_rxf_stats_v0 
  	u32 rx_in_range_errors;	/* dword 10*/
  	u32 rx_out_range_errors;	/* dword 11*/
  	u32 rx_frame_too_long;	/* dword 12*/
- 	u32 rx_address_mismatch_drops;	/* dword 13*/
- 	u32 rx_vlan_mismatch_drops;	/* dword 14*/
+ 	u32 rx_address_filtered;	/* dword 13*/
+ 	u32 rx_vlan_filtered;	/* dword 14*/
  	u32 rx_dropped_too_small;	/* dword 15*/
  	u32 rx_dropped_too_short;	/* dword 16*/
  	u32 rx_dropped_header_too_small;	/* dword 17*/
@@@ -828,8 -797,8 +810,8 @@@ struct lancer_pport_stats 
  	u32 rx_control_frames_unknown_opcode_hi;
  	u32 rx_in_range_errors;
  	u32 rx_out_of_range_errors;
- 	u32 rx_address_mismatch_drops;
- 	u32 rx_vlan_mismatch_drops;
+ 	u32 rx_address_filtered;
+ 	u32 rx_vlan_filtered;
  	u32 rx_dropped_too_small;
  	u32 rx_dropped_too_short;
  	u32 rx_dropped_header_too_small;
@@@ -1079,7 -1048,6 +1061,6 @@@ struct be_cmd_resp_modify_eq_delay 
  } __packed;
  
  /******************** Get FW Config *******************/
- #define BE_FUNCTION_CAPS_RSS			0x2
  /* The HW can come up in either of the following multi-channel modes
   * based on the skew/IPL.
   */
@@@ -1122,6 -1090,9 +1103,9 @@@ struct be_cmd_resp_query_fw_cfg 
  #define RSS_ENABLE_UDP_IPV4			0x10
  #define RSS_ENABLE_UDP_IPV6			0x20
  
+ #define L3_RSS_FLAGS				(RXH_IP_DST | RXH_IP_SRC)
+ #define L4_RSS_FLAGS				(RXH_L4_B_0_1 | RXH_L4_B_2_3)
+ 
  struct be_cmd_req_rss_config {
  	struct be_cmd_req_hdr hdr;
  	u32 if_id;
@@@ -1605,7 -1576,7 +1589,7 @@@ struct be_port_rxf_stats_v1 
  	u32 rx_in_range_errors;
  	u32 rx_out_range_errors;
  	u32 rx_frame_too_long;
- 	u32 rx_address_mismatch_drops;
+ 	u32 rx_address_filtered;
  	u32 rx_dropped_too_small;
  	u32 rx_dropped_too_short;
  	u32 rx_dropped_header_too_small;
@@@ -1719,9 -1690,11 +1703,11 @@@ struct be_cmd_req_set_ext_fat_caps 
  	struct be_fat_conf_params set_params;
  };
  
- #define RESOURCE_DESC_SIZE			72
- #define NIC_RESOURCE_DESC_TYPE_ID		0x41
+ #define RESOURCE_DESC_SIZE			88
+ #define NIC_RESOURCE_DESC_TYPE_V0		0x41
+ #define NIC_RESOURCE_DESC_TYPE_V1		0x51
  #define MAX_RESOURCE_DESC			4
+ #define MAX_RESOURCE_DESC_V1			32
  
  /* QOS unit number */
  #define QUN					4
@@@ -1768,7 -1741,7 +1754,7 @@@ struct be_cmd_req_get_func_config 
  };
  
  struct be_cmd_resp_get_func_config {
- 	struct be_cmd_req_hdr hdr;
+ 	struct be_cmd_resp_hdr hdr;
  	u32 desc_count;
  	u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
  };
@@@ -1787,6 -1760,12 +1773,12 @@@ struct be_cmd_resp_get_profile_config 
  	u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
  };
  
+ struct be_cmd_resp_get_profile_config_v1 {
+ 	struct be_cmd_req_hdr hdr;
+ 	u32 desc_count;
+ 	u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE];
+ };
+ 
  struct be_cmd_req_set_profile_config {
  	struct be_cmd_req_hdr hdr;
  	u32 rsvd;
@@@ -1804,6 -1783,12 +1796,12 @@@ struct be_cmd_enable_disable_vf 
  	u8 rsvd[3];
  };
  
+ struct be_cmd_req_intr_set {
+ 	struct be_cmd_req_hdr hdr;
+ 	u8 intr_enabled;
+ 	u8 rsvd[3];
+ };
+ 
  static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
  {
  	return flags & adapter->cmd_privileges ? true : false;
@@@ -1847,8 -1832,7 +1845,7 @@@ extern int be_cmd_mccq_create(struct be
  			struct be_queue_info *mccq,
  			struct be_queue_info *cq);
  extern int be_cmd_txq_create(struct be_adapter *adapter,
- 			struct be_queue_info *txq,
- 			struct be_queue_info *cq);
+ 			struct be_tx_obj *txo);
  extern int be_cmd_rxq_create(struct be_adapter *adapter,
  			struct be_queue_info *rxq, u16 cq_id,
  			u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
@@@ -1875,11 -1859,11 +1872,11 @@@ extern int be_cmd_set_flow_control(stru
  			u32 tx_fc, u32 rx_fc);
  extern int be_cmd_get_flow_control(struct be_adapter *adapter,
  			u32 *tx_fc, u32 *rx_fc);
- extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
- 			u32 *port_num, u32 *function_mode, u32 *function_caps);
+ extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+ 			u32 *function_mode, u32 *function_caps, u16 *asic_rev);
  extern int be_cmd_reset_function(struct be_adapter *adapter);
  extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- 			u16 table_size);
+ 			     u32 rss_hash_opts, u16 table_size);
  extern int be_process_mcc(struct be_adapter *adapter);
  extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
  			u8 port_num, u8 beacon, u8 status, u8 state);
@@@ -1944,10 -1928,11 +1941,11 @@@ extern int lancer_test_and_set_rdy_stat
  extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
  extern int be_cmd_get_func_config(struct be_adapter *adapter);
  extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- 				     u8 domain);
+ 				     u16 *txq_count, u8 domain);
  
  extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
  				     u8 domain);
  extern int be_cmd_get_if_id(struct be_adapter *adapter,
  			    struct be_vf_cfg *vf_cfg, int vf_num);
  extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+ extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --combined drivers/net/ethernet/emulex/benet/be_ethtool.c
index 1b7233c,ec3050b..5733cde
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@@ -1,5 -1,5 +1,5 @@@
  /*
-  * Copyright (C) 2005 - 2011 Emulex
+  * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -54,7 -54,7 +54,7 @@@ static const struct be_ethtool_stat et_
  	/* Received packets dropped when they don't pass the unicast or
  	 * multicast address filtering.
  	 */
- 	{DRVSTAT_INFO(rx_address_mismatch_drops)},
+ 	{DRVSTAT_INFO(rx_address_filtered)},
  	/* Received packets dropped when IP packet length field is less than
  	 * the IP header length field.
  	 */
@@@ -680,8 -680,7 +680,8 @@@ be_get_wol(struct net_device *netdev, s
  
  	if (be_is_wol_supported(adapter)) {
  		wol->supported |= WAKE_MAGIC;
 -		wol->wolopts |= WAKE_MAGIC;
 +		if (adapter->wol)
 +			wol->wolopts |= WAKE_MAGIC;
  	} else
  		wol->wolopts = 0;
  	memset(&wol->sopass, 0, sizeof(wol->sopass));
@@@ -720,10 -719,8 +720,8 @@@ be_test_ddr_dma(struct be_adapter *adap
  	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
  	ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
  					   &ddrdma_cmd.dma, GFP_KERNEL);
- 	if (!ddrdma_cmd.va) {
- 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ 	if (!ddrdma_cmd.va)
  		return -ENOMEM;
- 	}
  
  	for (i = 0; i < 2; i++) {
  		ret = be_cmd_ddr_dma_test(adapter, pattern[i],
@@@ -758,6 -755,12 +756,12 @@@ be_self_test(struct net_device *netdev
  	int status;
  	u8 link_status = 0;
  
+ 	if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
+ 		dev_err(&adapter->pdev->dev, "Self test not supported\n");
+ 		test->flags |= ETH_TEST_FL_FAILED;
+ 		return;
+ 	}
+ 
  	memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
  
  	if (test->flags & ETH_TEST_FL_OFFLINE) {
@@@ -846,11 -849,8 +850,8 @@@ be_read_eeprom(struct net_device *netde
  	eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
  					   &eeprom_cmd.dma, GFP_KERNEL);
  
- 	if (!eeprom_cmd.va) {
- 		dev_err(&adapter->pdev->dev,
- 			"Memory allocation failure. Could not read eeprom\n");
+ 	if (!eeprom_cmd.va)
  		return -ENOMEM;
- 	}
  
  	status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
  
@@@ -940,6 -940,159 +941,159 @@@ static void be_set_msg_level(struct net
  	return;
  }
  
+ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
+ {
+ 	u64 data = 0;
+ 
+ 	switch (flow_type) {
+ 	case TCP_V4_FLOW:
+ 		if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ 			data |= RXH_IP_DST | RXH_IP_SRC;
+ 		if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
+ 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ 		break;
+ 	case UDP_V4_FLOW:
+ 		if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ 			data |= RXH_IP_DST | RXH_IP_SRC;
+ 		if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
+ 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ 		break;
+ 	case TCP_V6_FLOW:
+ 		if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ 			data |= RXH_IP_DST | RXH_IP_SRC;
+ 		if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
+ 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ 		break;
+ 	case UDP_V6_FLOW:
+ 		if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ 			data |= RXH_IP_DST | RXH_IP_SRC;
+ 		if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
+ 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ 		break;
+ 	}
+ 
+ 	return data;
+ }
+ 
+ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ 		      u32 *rule_locs)
+ {
+ 	struct be_adapter *adapter = netdev_priv(netdev);
+ 
+ 	if (!be_multi_rxq(adapter)) {
+ 		dev_info(&adapter->pdev->dev,
+ 			 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	switch (cmd->cmd) {
+ 	case ETHTOOL_GRXFH:
+ 		cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
+ 		break;
+ 	case ETHTOOL_GRXRINGS:
+ 		cmd->data = adapter->num_rx_qs - 1;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+ 
+ 	return 0;
+ }
+ 
+ static int be_set_rss_hash_opts(struct be_adapter *adapter,
+ 				struct ethtool_rxnfc *cmd)
+ {
+ 	struct be_rx_obj *rxo;
+ 	int status = 0, i, j;
+ 	u8 rsstable[128];
+ 	u32 rss_flags = adapter->rss_flags;
+ 
+ 	if (cmd->data != L3_RSS_FLAGS &&
+ 	    cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ 		return -EINVAL;
+ 
+ 	switch (cmd->flow_type) {
+ 	case TCP_V4_FLOW:
+ 		if (cmd->data == L3_RSS_FLAGS)
+ 			rss_flags &= ~RSS_ENABLE_TCP_IPV4;
+ 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ 			rss_flags |= RSS_ENABLE_IPV4 |
+ 					RSS_ENABLE_TCP_IPV4;
+ 		break;
+ 	case TCP_V6_FLOW:
+ 		if (cmd->data == L3_RSS_FLAGS)
+ 			rss_flags &= ~RSS_ENABLE_TCP_IPV6;
+ 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ 			rss_flags |= RSS_ENABLE_IPV6 |
+ 					RSS_ENABLE_TCP_IPV6;
+ 		break;
+ 	case UDP_V4_FLOW:
+ 		if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
+ 		    BEx_chip(adapter))
+ 			return -EINVAL;
+ 
+ 		if (cmd->data == L3_RSS_FLAGS)
+ 			rss_flags &= ~RSS_ENABLE_UDP_IPV4;
+ 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ 			rss_flags |= RSS_ENABLE_IPV4 |
+ 					RSS_ENABLE_UDP_IPV4;
+ 		break;
+ 	case UDP_V6_FLOW:
+ 		if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
+ 		    BEx_chip(adapter))
+ 			return -EINVAL;
+ 
+ 		if (cmd->data == L3_RSS_FLAGS)
+ 			rss_flags &= ~RSS_ENABLE_UDP_IPV6;
+ 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ 			rss_flags |= RSS_ENABLE_IPV6 |
+ 					RSS_ENABLE_UDP_IPV6;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (rss_flags == adapter->rss_flags)
+ 		return status;
+ 
+ 	if (be_multi_rxq(adapter)) {
+ 		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ 			for_all_rss_queues(adapter, rxo, i) {
+ 				if ((j + i) >= 128)
+ 					break;
+ 				rsstable[j + i] = rxo->rss_id;
+ 			}
+ 		}
+ 	}
+ 	status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
+ 	if (!status)
+ 		adapter->rss_flags = rss_flags;
+ 
+ 	return status;
+ }
+ 
+ static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+ {
+ 	struct be_adapter *adapter = netdev_priv(netdev);
+ 	int status = 0;
+ 
+ 	if (!be_multi_rxq(adapter)) {
+ 		dev_err(&adapter->pdev->dev,
+ 			"ethtool::set_rxnfc: RX flow hashing is disabled\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	switch (cmd->cmd) {
+ 	case ETHTOOL_SRXFH:
+ 		status = be_set_rss_hash_opts(adapter, cmd);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+ 
+ 	return status;
+ }
+ 
  const struct ethtool_ops be_ethtool_ops = {
  	.get_settings = be_get_settings,
  	.get_drvinfo = be_get_drvinfo,
@@@ -963,4 -1116,6 +1117,6 @@@
  	.get_regs = be_get_regs,
  	.flash_device = be_do_flash,
  	.self_test = be_self_test,
+ 	.get_rxnfc = be_get_rxnfc,
+ 	.set_rxnfc = be_set_rxnfc,
  };
diff --combined drivers/net/ethernet/emulex/benet/be_main.c
index 1232e91,1c734915..4babc8a
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@@ -1,5 -1,5 +1,5 @@@
  /*
-  * Copyright (C) 2005 - 2011 Emulex
+  * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -146,20 -146,16 +146,16 @@@ static int be_queue_alloc(struct be_ada
  	q->entry_size = entry_size;
  	mem->size = len * entry_size;
  	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- 				     GFP_KERNEL);
+ 				     GFP_KERNEL | __GFP_ZERO);
  	if (!mem->va)
  		return -ENOMEM;
  	return 0;
  }
  
- static void be_intr_set(struct be_adapter *adapter, bool enable)
+ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
  {
  	u32 reg, enabled;
  
- 	if (adapter->eeh_error)
- 		return;
- 
  	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
  				&reg);
  	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
@@@ -175,6 -171,22 +171,22 @@@
  			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
  }
  
+ static void be_intr_set(struct be_adapter *adapter, bool enable)
+ {
+ 	int status = 0;
+ 
+ 	/* On lancer interrupts can't be controlled via this register */
+ 	if (lancer_chip(adapter))
+ 		return;
+ 
+ 	if (adapter->eeh_error)
+ 		return;
+ 
+ 	status = be_cmd_intr_set(adapter, enable);
+ 	if (status)
+ 		be_reg_intr_set(adapter, enable);
+ }
+ 
  static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
  {
  	u32 val = 0;
@@@ -185,14 -197,15 +197,15 @@@
  	iowrite32(val, adapter->db + DB_RQ_OFFSET);
  }
  
- static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
+ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
+ 			  u16 posted)
  {
  	u32 val = 0;
- 	val |= qid & DB_TXULP_RING_ID_MASK;
+ 	val |= txo->q.id & DB_TXULP_RING_ID_MASK;
  	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
  
  	wmb();
- 	iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
+ 	iowrite32(val, adapter->db + txo->db_offset);
  }
  
  static void be_eq_notify(struct be_adapter *adapter, u16 qid,
@@@ -340,9 -353,9 +353,9 @@@ static void populate_be_v0_stats(struc
  	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
  	drvs->rx_dropped_header_too_small =
  		port_stats->rx_dropped_header_too_small;
- 	drvs->rx_address_mismatch_drops =
- 					port_stats->rx_address_mismatch_drops +
- 					port_stats->rx_vlan_mismatch_drops;
+ 	drvs->rx_address_filtered =
+ 					port_stats->rx_address_filtered +
+ 					port_stats->rx_vlan_filtered;
  	drvs->rx_alignment_symbol_errors =
  		port_stats->rx_alignment_symbol_errors;
  
@@@ -391,7 -404,7 +404,7 @@@ static void populate_be_v1_stats(struc
  		port_stats->rx_dropped_header_too_small;
  	drvs->rx_input_fifo_overflow_drop =
  		port_stats->rx_input_fifo_overflow_drop;
- 	drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
+ 	drvs->rx_address_filtered = port_stats->rx_address_filtered;
  	drvs->rx_alignment_symbol_errors =
  		port_stats->rx_alignment_symbol_errors;
  	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
@@@ -432,9 -445,9 +445,9 @@@ static void populate_lancer_stats(struc
  	drvs->rx_dropped_header_too_small =
  				pport_stats->rx_dropped_header_too_small;
  	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
- 	drvs->rx_address_mismatch_drops =
- 					pport_stats->rx_address_mismatch_drops +
- 					pport_stats->rx_vlan_mismatch_drops;
+ 	drvs->rx_address_filtered =
+ 					pport_stats->rx_address_filtered +
+ 					pport_stats->rx_vlan_filtered;
  	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
  	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
  	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
@@@ -626,8 -639,13 +639,8 @@@ static inline u16 be_get_tx_vlan_tag(st
  	return vlan_tag;
  }
  
 -static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
 -{
 -	return vlan_tx_tag_present(skb) || adapter->pvid;
 -}
 -
  static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 -		struct sk_buff *skb, u32 wrb_cnt, u32 len)
 +		struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
  {
  	u16 vlan_tag;
  
@@@ -654,9 -672,8 +667,9 @@@
  		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
  	}
  
 +	/* To skip HW VLAN tagging: evt = 1, compl = 0 */
 +	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
  	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
 -	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
  	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
  	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
  }
@@@ -679,8 -696,7 +692,8 @@@ static void unmap_tx_frag(struct devic
  }
  
  static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
 -		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
 +		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
 +		bool skip_hw_vlan)
  {
  	dma_addr_t busaddr;
  	int i, copied = 0;
@@@ -729,7 -745,7 +742,7 @@@
  		queue_head_inc(txq);
  	}
  
 -	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
 +	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
  	be_dws_cpu_to_le(hdr, sizeof(*hdr));
  
  	return copied;
@@@ -746,8 -762,7 +759,8 @@@ dma_err
  }
  
  static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
 -					     struct sk_buff *skb)
 +					     struct sk_buff *skb,
 +					     bool *skip_hw_vlan)
  {
  	u16 vlan_tag = 0;
  
@@@ -757,72 -772,14 +770,72 @@@
  
  	if (vlan_tx_tag_present(skb)) {
  		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
- 		skb = __vlan_put_tag(skb, vlan_tag);
+ 		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
  		if (skb)
  			skb->vlan_tci = 0;
  	}
  
 +	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
 +		if (!vlan_tag)
 +			vlan_tag = adapter->pvid;
 +		if (skip_hw_vlan)
 +			*skip_hw_vlan = true;
 +	}
 +
 +	if (vlan_tag) {
- 		skb = __vlan_put_tag(skb, vlan_tag);
++		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 +		if (unlikely(!skb))
 +			return skb;
 +
 +		skb->vlan_tci = 0;
 +	}
 +
 +	/* Insert the outer VLAN, if any */
 +	if (adapter->qnq_vid) {
 +		vlan_tag = adapter->qnq_vid;
- 		skb = __vlan_put_tag(skb, vlan_tag);
++		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 +		if (unlikely(!skb))
 +			return skb;
 +		if (skip_hw_vlan)
 +			*skip_hw_vlan = true;
 +	}
 +
  	return skb;
  }
  
 +static bool be_ipv6_exthdr_check(struct sk_buff *skb)
 +{
 +	struct ethhdr *eh = (struct ethhdr *)skb->data;
 +	u16 offset = ETH_HLEN;
 +
 +	if (eh->h_proto == htons(ETH_P_IPV6)) {
 +		struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
 +
 +		offset += sizeof(struct ipv6hdr);
 +		if (ip6h->nexthdr != NEXTHDR_TCP &&
 +		    ip6h->nexthdr != NEXTHDR_UDP) {
 +			struct ipv6_opt_hdr *ehdr =
 +				(struct ipv6_opt_hdr *) (skb->data + offset);
 +
 +			/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
 +			if (ehdr->hdrlen == 0xff)
 +				return true;
 +		}
 +	}
 +	return false;
 +}
 +
 +static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
 +{
 +	return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
 +}
 +
 +static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
 +{
 +	return BE3_chip(adapter) &&
 +		be_ipv6_exthdr_check(skb);
 +}
 +
  static netdev_tx_t be_xmit(struct sk_buff *skb,
  			struct net_device *netdev)
  {
@@@ -833,64 -790,33 +846,64 @@@
  	u32 wrb_cnt = 0, copied = 0;
  	u32 start = txq->head, eth_hdr_len;
  	bool dummy_wrb, stopped = false;
 +	bool skip_hw_vlan = false;
 +	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
  
  	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
  		VLAN_ETH_HLEN : ETH_HLEN;
  
 -	/* HW has a bug which considers padding bytes as legal
 -	 * and modifies the IPv4 hdr's 'tot_len' field
 +	/* For padded packets, BE HW modifies tot_len field in IP header
 +	 * incorrecly when VLAN tag is inserted by HW.
  	 */
 -	if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
 -			is_ipv4_pkt(skb)) {
 +	if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
  		ip = (struct iphdr *)ip_hdr(skb);
  		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
  	}
  
 +	/* If vlan tag is already inlined in the packet, skip HW VLAN
 +	 * tagging in UMC mode
 +	 */
 +	if ((adapter->function_mode & UMC_ENABLED) &&
 +	    veh->h_vlan_proto == htons(ETH_P_8021Q))
 +			skip_hw_vlan = true;
 +
  	/* HW has a bug wherein it will calculate CSUM for VLAN
  	 * pkts even though it is disabled.
  	 * Manually insert VLAN in pkt.
  	 */
  	if (skb->ip_summed != CHECKSUM_PARTIAL &&
 -			be_vlan_tag_chk(adapter, skb)) {
 -		skb = be_insert_vlan_in_pkt(adapter, skb);
 +			vlan_tx_tag_present(skb)) {
 +		skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
 +		if (unlikely(!skb))
 +			goto tx_drop;
 +	}
 +
 +	/* HW may lockup when VLAN HW tagging is requested on
 +	 * certain ipv6 packets. Drop such pkts if the HW workaround to
 +	 * skip HW tagging is not enabled by FW.
 +	 */
 +	if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
 +		     (adapter->pvid || adapter->qnq_vid) &&
 +		     !qnq_async_evt_rcvd(adapter)))
 +		goto tx_drop;
 +
 +	/* Manual VLAN tag insertion to prevent:
 +	 * ASIC lockup when the ASIC inserts VLAN tag into
 +	 * certain ipv6 packets. Insert VLAN tags in driver,
 +	 * and set event, completion, vlan bits accordingly
 +	 * in the Tx WRB.
 +	 */
 +	if (be_ipv6_tx_stall_chk(adapter, skb) &&
 +	    be_vlan_tag_tx_chk(adapter, skb)) {
 +		skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
  		if (unlikely(!skb))
  			goto tx_drop;
  	}
  
  	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
  
 -	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
 +	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
 +			      skip_hw_vlan);
  	if (copied) {
  		int gso_segs = skb_shinfo(skb)->gso_segs;
  
@@@ -909,7 -835,7 +922,7 @@@
  			stopped = true;
  		}
  
- 		be_txq_notify(adapter, txq->id, wrb_cnt);
+ 		be_txq_notify(adapter, txo, wrb_cnt);
  
  		be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
  	} else {
@@@ -978,7 -904,7 +991,7 @@@ set_vlan_promisc
  	return status;
  }
  
- static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
+ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
  {
  	struct be_adapter *adapter = netdev_priv(netdev);
  	int status = 0;
@@@ -1004,7 -930,7 +1017,7 @@@ ret
  	return status;
  }
  
- static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
  {
  	struct be_adapter *adapter = netdev_priv(netdev);
  	int status = 0;
@@@ -1459,7 -1385,7 +1472,7 @@@ static void be_rx_compl_process(struct 
  
  
  	if (rxcp->vlanf)
- 		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+ 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
  
  	netif_receive_skb(skb);
  }
@@@ -1515,7 -1441,7 +1528,7 @@@ void be_rx_compl_process_gro(struct be_
  		skb->rxhash = rxcp->rss_hash;
  
  	if (rxcp->vlanf)
- 		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+ 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
  
  	napi_gro_frags(napi);
  }
@@@ -2045,7 -1971,7 +2058,7 @@@ static int be_tx_qs_create(struct be_ad
  		if (status)
  			return status;
  
- 		status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+ 		status = be_cmd_txq_create(adapter, txo);
  		if (status)
  			return status;
  	}
@@@ -2523,9 -2449,6 +2536,6 @@@ static int be_close(struct net_device *
  
  	be_roce_dev_close(adapter);
  
- 	if (!lancer_chip(adapter))
- 		be_intr_set(adapter, false);
- 
  	for_all_evt_queues(adapter, eqo, i)
  		napi_disable(&eqo->napi);
  
@@@ -2587,9 -2510,19 +2597,19 @@@ static int be_rx_qs_create(struct be_ad
  				rsstable[j + i] = rxo->rss_id;
  			}
  		}
- 		rc = be_cmd_rss_config(adapter, rsstable, 128);
- 		if (rc)
+ 		adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ 					RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+ 
+ 		if (!BEx_chip(adapter))
+ 			adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+ 						RSS_ENABLE_UDP_IPV6;
+ 
+ 		rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
+ 				       128);
+ 		if (rc) {
+ 			adapter->rss_flags = 0;
  			return rc;
+ 		}
  	}
  
  	/* First time posting */
@@@ -2613,9 -2546,6 +2633,6 @@@ static int be_open(struct net_device *n
  
  	be_irq_register(adapter);
  
- 	if (!lancer_chip(adapter))
- 		be_intr_set(adapter, true);
- 
  	for_all_rx_queues(adapter, rxo, i)
  		be_cq_notify(adapter, rxo->cq.id, true, 0);
  
@@@ -2650,10 -2580,9 +2667,9 @@@ static int be_setup_wol(struct be_adapt
  
  	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
  	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- 				    GFP_KERNEL);
+ 				    GFP_KERNEL | __GFP_ZERO);
  	if (cmd.va == NULL)
  		return -1;
- 	memset(cmd.va, 0, cmd.size);
  
  	if (enable) {
  		status = pci_write_config_dword(adapter->pdev,
@@@ -2801,7 -2730,8 +2817,8 @@@ static int be_vfs_if_create(struct be_a
  
  	for_all_vfs(adapter, vf_cfg, vf) {
  		if (!BE3_chip(adapter))
- 			be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
+ 			be_cmd_get_profile_config(adapter, &cap_flags,
+ 						  NULL, vf + 1);
  
  		/* If a FW profile exists, then cap_flags are updated */
  		en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@@ -2965,11 -2895,14 +2982,14 @@@ static void be_get_resources(struct be_
  	u16 dev_num_vfs;
  	int pos, status;
  	bool profile_present = false;
+ 	u16 txq_count = 0;
  
  	if (!BEx_chip(adapter)) {
  		status = be_cmd_get_func_config(adapter);
  		if (!status)
  			profile_present = true;
+ 	} else if (BE3_chip(adapter) && be_physfn(adapter)) {
+ 		be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
  	}
  
  	if (profile_present) {
@@@ -3007,7 -2940,9 +3027,9 @@@
  			adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
  
  		adapter->max_mcast_mac = BE_MAX_MC;
- 		adapter->max_tx_queues = MAX_TX_QS;
+ 		adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
+ 		adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
+ 					       MAX_TX_QS);
  		adapter->max_rss_queues = (adapter->be3_native) ?
  					   BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
  		adapter->max_event_queues = BE3_MAX_RSS_QS;
@@@ -3041,7 -2976,8 +3063,8 @@@ static int be_get_config(struct be_adap
  
  	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
  				     &adapter->function_mode,
- 				     &adapter->function_caps);
+ 				     &adapter->function_caps,
+ 				     &adapter->asic_rev);
  	if (status)
  		goto err;
  
@@@ -3302,7 -3238,7 +3325,7 @@@ static int be_flash(struct be_adapter *
  	return 0;
  }
  
- /* For BE2 and BE3 */
+ /* For BE2, BE3 and BE3-R */
  static int be_flash_BEx(struct be_adapter *adapter,
  			 const struct firmware *fw,
  			 struct be_dma_mem *flash_cmd,
@@@ -3545,11 -3481,9 +3568,9 @@@ static int lancer_fw_download(struct be
  	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
  				+ LANCER_FW_DOWNLOAD_CHUNK;
  	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
- 						&flash_cmd.dma, GFP_KERNEL);
+ 					  &flash_cmd.dma, GFP_KERNEL);
  	if (!flash_cmd.va) {
  		status = -ENOMEM;
- 		dev_err(&adapter->pdev->dev,
- 			"Memory allocation failure while flashing\n");
  		goto lancer_fw_exit;
  	}
  
@@@ -3617,18 -3551,22 +3638,22 @@@ lancer_fw_exit
  
  #define UFI_TYPE2		2
  #define UFI_TYPE3		3
+ #define UFI_TYPE3R		10
  #define UFI_TYPE4		4
  static int be_get_ufi_type(struct be_adapter *adapter,
- 			   struct flash_file_hdr_g2 *fhdr)
+ 			   struct flash_file_hdr_g3 *fhdr)
  {
  	if (fhdr == NULL)
  		goto be_get_ufi_exit;
  
  	if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
  		return UFI_TYPE4;
- 	else if (BE3_chip(adapter) && fhdr->build[0] == '3')
- 		return UFI_TYPE3;
- 	else if (BE2_chip(adapter) && fhdr->build[0] == '2')
+ 	else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
+ 		if (fhdr->asic_type_rev == 0x10)
+ 			return UFI_TYPE3R;
+ 		else
+ 			return UFI_TYPE3;
+ 	} else if (BE2_chip(adapter) && fhdr->build[0] == '2')
  		return UFI_TYPE2;
  
  be_get_ufi_exit:
@@@ -3639,7 -3577,6 +3664,6 @@@
  
  static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
  {
- 	struct flash_file_hdr_g2 *fhdr;
  	struct flash_file_hdr_g3 *fhdr3;
  	struct image_hdr *img_hdr_ptr = NULL;
  	struct be_dma_mem flash_cmd;
@@@ -3651,29 -3588,41 +3675,41 @@@
  					  &flash_cmd.dma, GFP_KERNEL);
  	if (!flash_cmd.va) {
  		status = -ENOMEM;
- 		dev_err(&adapter->pdev->dev,
- 			"Memory allocation failure while flashing\n");
  		goto be_fw_exit;
  	}
  
  	p = fw->data;
- 	fhdr = (struct flash_file_hdr_g2 *)p;
+ 	fhdr3 = (struct flash_file_hdr_g3 *)p;
  
- 	ufi_type = be_get_ufi_type(adapter, fhdr);
+ 	ufi_type = be_get_ufi_type(adapter, fhdr3);
  
- 	fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
  	num_imgs = le32_to_cpu(fhdr3->num_imgs);
  	for (i = 0; i < num_imgs; i++) {
  		img_hdr_ptr = (struct image_hdr *)(fw->data +
  				(sizeof(struct flash_file_hdr_g3) +
  				 i * sizeof(struct image_hdr)));
  		if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
- 			if (ufi_type == UFI_TYPE4)
+ 			switch (ufi_type) {
+ 			case UFI_TYPE4:
  				status = be_flash_skyhawk(adapter, fw,
  							&flash_cmd, num_imgs);
- 			else if (ufi_type == UFI_TYPE3)
+ 				break;
+ 			case UFI_TYPE3R:
  				status = be_flash_BEx(adapter, fw, &flash_cmd,
  						      num_imgs);
+ 				break;
+ 			case UFI_TYPE3:
+ 				/* Do not flash this ufi on BE3-R cards */
+ 				if (adapter->asic_rev < 0x10)
+ 					status = be_flash_BEx(adapter, fw,
+ 							      &flash_cmd,
+ 							      num_imgs);
+ 				else {
+ 					status = -1;
+ 					dev_err(&adapter->pdev->dev,
+ 						"Can't load BE3 UFI on BE3R\n");
+ 				}
+ 			}
  		}
  	}
  
@@@ -3750,12 -3699,12 +3786,12 @@@ static void be_netdev_init(struct net_d
  
  	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
  		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
- 		NETIF_F_HW_VLAN_TX;
+ 		NETIF_F_HW_VLAN_CTAG_TX;
  	if (be_multi_rxq(adapter))
  		netdev->hw_features |= NETIF_F_RXHASH;
  
  	netdev->features |= netdev->hw_features |
- 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
  
  	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
  		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@@ -3879,12 -3828,13 +3915,13 @@@ static int be_ctrl_init(struct be_adapt
  
  	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
  	rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
- 					&rx_filter->dma, GFP_KERNEL);
+ 					   &rx_filter->dma,
+ 					   GFP_KERNEL | __GFP_ZERO);
  	if (rx_filter->va == NULL) {
  		status = -ENOMEM;
  		goto free_mbox;
  	}
- 	memset(rx_filter->va, 0, rx_filter->size);
+ 
  	mutex_init(&adapter->mbox_lock);
  	spin_lock_init(&adapter->mcc_lock);
  	spin_lock_init(&adapter->mcc_cq_lock);
@@@ -3926,10 -3876,9 +3963,9 @@@ static int be_stats_init(struct be_adap
  		cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
  
  	cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
- 				     GFP_KERNEL);
+ 				     GFP_KERNEL | __GFP_ZERO);
  	if (cmd->va == NULL)
  		return -1;
- 	memset(cmd->va, 0, cmd->size);
  	return 0;
  }
  
@@@ -3941,6 -3890,7 +3977,7 @@@ static void be_remove(struct pci_dev *p
  		return;
  
  	be_roce_dev_remove(adapter);
+ 	be_intr_set(adapter, false);
  
  	cancel_delayed_work_sync(&adapter->func_recovery_work);
  
@@@ -4195,6 -4145,11 +4232,11 @@@ static int be_probe(struct pci_dev *pde
  
  	status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  	if (!status) {
+ 		status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ 		if (status < 0) {
+ 			dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
+ 			goto free_netdev;
+ 		}
  		netdev->features |= NETIF_F_HIGHDMA;
  	} else {
  		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
@@@ -4219,22 -4174,22 +4261,22 @@@
  			goto ctrl_clean;
  	}
  
- 	/* tell fw we're ready to fire cmds */
- 	status = be_cmd_fw_init(adapter);
- 	if (status)
- 		goto ctrl_clean;
- 
  	if (be_reset_required(adapter)) {
  		status = be_cmd_reset_function(adapter);
  		if (status)
  			goto ctrl_clean;
+ 
+ 		/* Wait for interrupts to quiesce after an FLR */
+ 		msleep(100);
  	}
  
- 	/* The INTR bit may be set in the card when probed by a kdump kernel
- 	 * after a crash.
- 	 */
- 	if (!lancer_chip(adapter))
- 		be_intr_set(adapter, false);
+ 	/* Allow interrupts for other ULPs running on NIC function */
+ 	be_intr_set(adapter, true);
+ 
+ 	/* tell fw we're ready to fire cmds */
+ 	status = be_cmd_fw_init(adapter);
+ 	if (status)
+ 		goto ctrl_clean;
  
  	status = be_stats_init(adapter);
  	if (status)
@@@ -4445,12 -4400,12 +4487,12 @@@ static void be_eeh_resume(struct pci_de
  
  	pci_save_state(pdev);
  
- 	/* tell fw we're ready to fire cmds */
- 	status = be_cmd_fw_init(adapter);
+ 	status = be_cmd_reset_function(adapter);
  	if (status)
  		goto err;
  
- 	status = be_cmd_reset_function(adapter);
+ 	/* tell fw we're ready to fire cmds */
+ 	status = be_cmd_fw_init(adapter);
  	if (status)
  		goto err;
  
diff --combined drivers/net/ethernet/freescale/gianfar_ptp.c
index a3f8a25,fe8e9e5..576e4b8
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@@ -17,6 -17,9 +17,9 @@@
   *  along with this program; if not, write to the Free Software
   *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
   */
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
  #include <linux/device.h>
  #include <linux/hrtimer.h>
  #include <linux/init.h>
@@@ -127,6 -130,7 +130,6 @@@ struct gianfar_ptp_registers 
  
  #define DRIVER		"gianfar_ptp"
  #define DEFAULT_CKSEL	1
 -#define N_ALARM		1 /* first alarm is used internally to reset fipers */
  #define N_EXT_TS	2
  #define REG_SIZE	sizeof(struct gianfar_ptp_registers)
  
@@@ -409,7 -413,7 +412,7 @@@ static struct ptp_clock_info ptp_gianfa
  	.owner		= THIS_MODULE,
  	.name		= "gianfar clock",
  	.max_adj	= 512000,
 -	.n_alarm	= N_ALARM,
 +	.n_alarm	= 0,
  	.n_ext_ts	= N_EXT_TS,
  	.n_per_out	= 0,
  	.pps		= 1,
diff --combined drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index ab98b77,8412570..ef46b58
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@@ -705,7 -705,19 +705,7 @@@ static struct pcmcia_driver fmvj18x_cs_
  	.suspend	= fmvj18x_suspend,
  	.resume		= fmvj18x_resume,
  };
 -
 -static int __init init_fmvj18x_cs(void)
 -{
 -	return pcmcia_register_driver(&fmvj18x_cs_driver);
 -}
 -
 -static void __exit exit_fmvj18x_cs(void)
 -{
 -	pcmcia_unregister_driver(&fmvj18x_cs_driver);
 -}
 -
 -module_init(init_fmvj18x_cs);
 -module_exit(exit_fmvj18x_cs);
 +module_pcmcia_driver(fmvj18x_cs_driver);
  
  /*====================================================================*/
  
@@@ -991,8 -1003,6 +991,6 @@@ static void fjn_rx(struct net_device *d
  	    }
  	    skb = netdev_alloc_skb(dev, pkt_len + 2);
  	    if (skb == NULL) {
- 		netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
- 			      pkt_len);
  		outb(F_SKP_PKT, ioaddr + RX_SKIP);
  		dev->stats.rx_dropped++;
  		break;
diff --combined drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 2047684,bcf4d11..c9e6b62
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@@ -889,7 -889,7 +889,7 @@@ static int mlx4_en_flow_replace(struct 
  		.queue_mode = MLX4_NET_TRANS_Q_FIFO,
  		.exclusive = 0,
  		.allow_loopback = 1,
 -		.promisc_mode = MLX4_FS_PROMISC_NONE,
 +		.promisc_mode = MLX4_FS_REGULAR,
  	};
  
  	rule.port = priv->port;
@@@ -1147,6 -1147,35 +1147,35 @@@ out
  	return err;
  }
  
+ static int mlx4_en_get_ts_info(struct net_device *dev,
+ 			       struct ethtool_ts_info *info)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	struct mlx4_en_dev *mdev = priv->mdev;
+ 	int ret;
+ 
+ 	ret = ethtool_op_get_ts_info(dev, info);
+ 	if (ret)
+ 		return ret;
+ 
+ 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
+ 		info->so_timestamping |=
+ 			SOF_TIMESTAMPING_TX_HARDWARE |
+ 			SOF_TIMESTAMPING_RX_HARDWARE |
+ 			SOF_TIMESTAMPING_RAW_HARDWARE;
+ 
+ 		info->tx_types =
+ 			(1 << HWTSTAMP_TX_OFF) |
+ 			(1 << HWTSTAMP_TX_ON);
+ 
+ 		info->rx_filters =
+ 			(1 << HWTSTAMP_FILTER_NONE) |
+ 			(1 << HWTSTAMP_FILTER_ALL);
+ 	}
+ 
+ 	return ret;
+ }
+ 
  const struct ethtool_ops mlx4_en_ethtool_ops = {
  	.get_drvinfo = mlx4_en_get_drvinfo,
  	.get_settings = mlx4_en_get_settings,
@@@ -1173,6 -1202,7 +1202,7 @@@
  	.set_rxfh_indir = mlx4_en_set_rxfh_indir,
  	.get_channels = mlx4_en_get_channels,
  	.set_channels = mlx4_en_set_channels,
+ 	.get_ts_info = mlx4_en_get_ts_info,
  };
  
  
diff --combined drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0860130,a69a908..b35f947
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@@ -127,7 -127,7 +127,7 @@@ static void mlx4_en_filter_work(struct 
  		.queue_mode = MLX4_NET_TRANS_Q_LIFO,
  		.exclusive = 1,
  		.allow_loopback = 1,
 -		.promisc_mode = MLX4_FS_PROMISC_NONE,
 +		.promisc_mode = MLX4_FS_REGULAR,
  		.port = priv->port,
  		.priority = MLX4_DOMAIN_RFS,
  	};
@@@ -356,7 -356,8 +356,8 @@@ static void mlx4_en_filter_rfs_expire(s
  }
  #endif
  
- static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
+ 				   __be16 proto, u16 vid)
  {
  	struct mlx4_en_priv *priv = netdev_priv(dev);
  	struct mlx4_en_dev *mdev = priv->mdev;
@@@ -381,7 -382,8 +382,8 @@@
  	return 0;
  }
  
- static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
+ 				    __be16 proto, u16 vid)
  {
  	struct mlx4_en_priv *priv = netdev_priv(dev);
  	struct mlx4_en_dev *mdev = priv->mdev;
@@@ -446,7 -448,7 +448,7 @@@ static int mlx4_en_uc_steer_add(struct 
  			.queue_mode = MLX4_NET_TRANS_Q_FIFO,
  			.exclusive = 0,
  			.allow_loopback = 1,
 -			.promisc_mode = MLX4_FS_PROMISC_NONE,
 +			.promisc_mode = MLX4_FS_REGULAR,
  			.priority = MLX4_DOMAIN_NIC,
  		};
  
@@@ -793,7 -795,7 +795,7 @@@ static void mlx4_en_set_promisc_mode(st
  			err = mlx4_flow_steer_promisc_add(mdev->dev,
  							  priv->port,
  							  priv->base_qpn,
 -							  MLX4_FS_PROMISC_UPLINK);
 +							  MLX4_FS_ALL_DEFAULT);
  			if (err)
  				en_err(priv, "Failed enabling promiscuous mode\n");
  			priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
@@@ -856,7 -858,7 +858,7 @@@ static void mlx4_en_clear_promisc_mode(
  	case MLX4_STEERING_MODE_DEVICE_MANAGED:
  		err = mlx4_flow_steer_promisc_remove(mdev->dev,
  						     priv->port,
 -						     MLX4_FS_PROMISC_UPLINK);
 +						     MLX4_FS_ALL_DEFAULT);
  		if (err)
  			en_err(priv, "Failed disabling promiscuous mode\n");
  		priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@@ -917,7 -919,7 +919,7 @@@ static void mlx4_en_do_multicast(struc
  				err = mlx4_flow_steer_promisc_add(mdev->dev,
  								  priv->port,
  								  priv->base_qpn,
 -								  MLX4_FS_PROMISC_ALL_MULTI);
 +								  MLX4_FS_MC_DEFAULT);
  				break;
  
  			case MLX4_STEERING_MODE_B0:
@@@ -940,7 -942,7 +942,7 @@@
  			case MLX4_STEERING_MODE_DEVICE_MANAGED:
  				err = mlx4_flow_steer_promisc_remove(mdev->dev,
  								     priv->port,
 -								     MLX4_FS_PROMISC_ALL_MULTI);
 +								     MLX4_FS_MC_DEFAULT);
  				break;
  
  			case MLX4_STEERING_MODE_B0:
@@@ -1359,6 -1361,27 +1361,27 @@@ static void mlx4_en_do_get_stats(struc
  	mutex_unlock(&mdev->state_lock);
  }
  
+ /* mlx4_en_service_task - Run service task for tasks that needed to be done
+  * periodically
+  */
+ static void mlx4_en_service_task(struct work_struct *work)
+ {
+ 	struct delayed_work *delay = to_delayed_work(work);
+ 	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+ 						 service_task);
+ 	struct mlx4_en_dev *mdev = priv->mdev;
+ 
+ 	mutex_lock(&mdev->state_lock);
+ 	if (mdev->device_up) {
+ 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ 			mlx4_en_ptp_overflow_check(mdev);
+ 
+ 		queue_delayed_work(mdev->workqueue, &priv->service_task,
+ 				   SERVICE_TASK_DELAY);
+ 	}
+ 	mutex_unlock(&mdev->state_lock);
+ }
+ 
  static void mlx4_en_linkstate(struct work_struct *work)
  {
  	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
@@@ -1598,10 -1621,10 +1621,10 @@@ void mlx4_en_stop_port(struct net_devic
  				 MLX4_EN_FLAG_MC_PROMISC);
  		mlx4_flow_steer_promisc_remove(mdev->dev,
  					       priv->port,
 -					       MLX4_FS_PROMISC_UPLINK);
 +					       MLX4_FS_ALL_DEFAULT);
  		mlx4_flow_steer_promisc_remove(mdev->dev,
  					       priv->port,
 -					       MLX4_FS_PROMISC_ALL_MULTI);
 +					       MLX4_FS_MC_DEFAULT);
  	} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
  		priv->flags &= ~MLX4_EN_FLAG_PROMISC;
  
@@@ -1863,6 -1886,7 +1886,7 @@@ void mlx4_en_destroy_netdev(struct net_
  		mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
  
  	cancel_delayed_work(&priv->stats_task);
+ 	cancel_delayed_work(&priv->service_task);
  	/* flush any pending task for this netdev */
  	flush_workqueue(mdev->workqueue);
  
@@@ -1914,6 -1938,75 +1938,75 @@@ static int mlx4_en_change_mtu(struct ne
  	return 0;
  }
  
+ static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	struct mlx4_en_dev *mdev = priv->mdev;
+ 	struct hwtstamp_config config;
+ 
+ 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ 		return -EFAULT;
+ 
+ 	/* reserved for future extensions */
+ 	if (config.flags)
+ 		return -EINVAL;
+ 
+ 	/* device doesn't support time stamping */
+ 	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
+ 		return -EINVAL;
+ 
+ 	/* TX HW timestamp */
+ 	switch (config.tx_type) {
+ 	case HWTSTAMP_TX_OFF:
+ 	case HWTSTAMP_TX_ON:
+ 		break;
+ 	default:
+ 		return -ERANGE;
+ 	}
+ 
+ 	/* RX HW timestamp */
+ 	switch (config.rx_filter) {
+ 	case HWTSTAMP_FILTER_NONE:
+ 		break;
+ 	case HWTSTAMP_FILTER_ALL:
+ 	case HWTSTAMP_FILTER_SOME:
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ 		config.rx_filter = HWTSTAMP_FILTER_ALL;
+ 		break;
+ 	default:
+ 		return -ERANGE;
+ 	}
+ 
+ 	if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
+ 		config.tx_type = HWTSTAMP_TX_OFF;
+ 		config.rx_filter = HWTSTAMP_FILTER_NONE;
+ 	}
+ 
+ 	return copy_to_user(ifr->ifr_data, &config,
+ 			    sizeof(config)) ? -EFAULT : 0;
+ }
+ 
+ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ {
+ 	switch (cmd) {
+ 	case SIOCSHWTSTAMP:
+ 		return mlx4_en_hwtstamp_ioctl(dev, ifr);
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+ }
+ 
  static int mlx4_en_set_features(struct net_device *netdev,
  		netdev_features_t features)
  {
@@@ -1931,77 -2024,40 +2024,40 @@@
  
  }
  
- static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- 			   struct net_device *dev,
- 			   const unsigned char *addr, u16 flags)
+ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
  {
- 	struct mlx4_en_priv *priv = netdev_priv(dev);
- 	struct mlx4_dev *mdev = priv->mdev->dev;
- 	int err;
- 
- 	if (!mlx4_is_mfunc(mdev))
- 		return -EOPNOTSUPP;
+ 	struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ 	struct mlx4_en_dev *mdev = en_priv->mdev;
+ 	u64 mac_u64 = mlx4_en_mac_to_u64(mac);
  
- 	/* Hardware does not support aging addresses, allow only
- 	 * permanent addresses if ndm_state is given
- 	 */
- 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- 		en_info(priv, "Add FDB only supports static addresses\n");
+ 	if (!is_valid_ether_addr(mac))
  		return -EINVAL;
- 	}
  
- 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
- 		err = dev_uc_add_excl(dev, addr);
- 	else if (is_multicast_ether_addr(addr))
- 		err = dev_mc_add_excl(dev, addr);
- 	else
- 		err = -EINVAL;
- 
- 	/* Only return duplicate errors if NLM_F_EXCL is set */
- 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
- 		err = 0;
- 
- 	return err;
+ 	return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
  }
  
- static int mlx4_en_fdb_del(struct ndmsg *ndm,
- 			   struct nlattr *tb[],
- 			   struct net_device *dev,
- 			   const unsigned char *addr)
+ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
  {
- 	struct mlx4_en_priv *priv = netdev_priv(dev);
- 	struct mlx4_dev *mdev = priv->mdev->dev;
- 	int err;
- 
- 	if (!mlx4_is_mfunc(mdev))
- 		return -EOPNOTSUPP;
+ 	struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ 	struct mlx4_en_dev *mdev = en_priv->mdev;
  
- 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- 		en_info(priv, "Del FDB only supports static addresses\n");
- 		return -EINVAL;
- 	}
+ 	return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
+ }
  
- 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
- 		err = dev_uc_del(dev, addr);
- 	else if (is_multicast_ether_addr(addr))
- 		err = dev_mc_del(dev, addr);
- 	else
- 		err = -EINVAL;
+ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+ {
+ 	struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ 	struct mlx4_en_dev *mdev = en_priv->mdev;
  
- 	return err;
+ 	return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
  }
  
- static int mlx4_en_fdb_dump(struct sk_buff *skb,
- 			    struct netlink_callback *cb,
- 			    struct net_device *dev, int idx)
+ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
  {
- 	struct mlx4_en_priv *priv = netdev_priv(dev);
- 	struct mlx4_dev *mdev = priv->mdev->dev;
- 
- 	if (mlx4_is_mfunc(mdev))
- 		idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+ 	struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ 	struct mlx4_en_dev *mdev = en_priv->mdev;
  
- 	return idx;
+ 	return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
  }
  
  static const struct net_device_ops mlx4_netdev_ops = {
@@@ -2014,6 -2070,7 +2070,7 @@@
  	.ndo_set_mac_address	= mlx4_en_set_mac,
  	.ndo_validate_addr	= eth_validate_addr,
  	.ndo_change_mtu		= mlx4_en_change_mtu,
+ 	.ndo_do_ioctl		= mlx4_en_ioctl,
  	.ndo_tx_timeout		= mlx4_en_tx_timeout,
  	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
  	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
@@@ -2025,9 -2082,33 +2082,33 @@@
  #ifdef CONFIG_RFS_ACCEL
  	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
  #endif
- 	.ndo_fdb_add		= mlx4_en_fdb_add,
- 	.ndo_fdb_del		= mlx4_en_fdb_del,
- 	.ndo_fdb_dump		= mlx4_en_fdb_dump,
+ };
+ 
+ static const struct net_device_ops mlx4_netdev_ops_master = {
+ 	.ndo_open		= mlx4_en_open,
+ 	.ndo_stop		= mlx4_en_close,
+ 	.ndo_start_xmit		= mlx4_en_xmit,
+ 	.ndo_select_queue	= mlx4_en_select_queue,
+ 	.ndo_get_stats		= mlx4_en_get_stats,
+ 	.ndo_set_rx_mode	= mlx4_en_set_rx_mode,
+ 	.ndo_set_mac_address	= mlx4_en_set_mac,
+ 	.ndo_validate_addr	= eth_validate_addr,
+ 	.ndo_change_mtu		= mlx4_en_change_mtu,
+ 	.ndo_tx_timeout		= mlx4_en_tx_timeout,
+ 	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
+ 	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
+ 	.ndo_set_vf_mac		= mlx4_en_set_vf_mac,
+ 	.ndo_set_vf_vlan	= mlx4_en_set_vf_vlan,
+ 	.ndo_set_vf_spoofchk	= mlx4_en_set_vf_spoofchk,
+ 	.ndo_get_vf_config	= mlx4_en_get_vf_config,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ 	.ndo_poll_controller	= mlx4_en_netpoll,
+ #endif
+ 	.ndo_set_features	= mlx4_en_set_features,
+ 	.ndo_setup_tc		= mlx4_en_setup_tc,
+ #ifdef CONFIG_RFS_ACCEL
+ 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
+ #endif
  };
  
  int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@@ -2088,9 -2169,16 +2169,16 @@@
  	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
  	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
  	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+ 	INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
  #ifdef CONFIG_MLX4_EN_DCB
- 	if (!mlx4_is_slave(priv->mdev->dev))
- 		dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
+ 	if (!mlx4_is_slave(priv->mdev->dev)) {
+ 		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
+ 			dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
+ 		} else {
+ 			en_info(priv, "enabling only PFC DCB ops\n");
+ 			dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
+ 		}
+ 	}
  #endif
  
  	for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
@@@ -2122,6 -2210,11 +2210,11 @@@
  	spin_lock_init(&priv->filters_lock);
  #endif
  
+ 	/* Initialize time stamping config */
+ 	priv->hwtstamp_config.flags = 0;
+ 	priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ 	priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ 
  	/* Allocate page for receive rings */
  	err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
  				MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
@@@ -2134,7 -2227,10 +2227,10 @@@
  	/*
  	 * Initialize netdev entry points
  	 */
- 	dev->netdev_ops = &mlx4_netdev_ops;
+ 	if (mlx4_is_master(priv->mdev->dev))
+ 		dev->netdev_ops = &mlx4_netdev_ops_master;
+ 	else
+ 		dev->netdev_ops = &mlx4_netdev_ops;
  	dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
  	netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
  	netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@@ -2152,8 -2248,8 +2248,8 @@@
  
  	dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
  	dev->features = dev->hw_features | NETIF_F_HIGHDMA |
- 			NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- 			NETIF_F_HW_VLAN_FILTER;
+ 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ 			NETIF_F_HW_VLAN_CTAG_FILTER;
  	dev->hw_features |= NETIF_F_LOOPBACK;
  
  	if (mdev->dev->caps.steering_mode ==
@@@ -2199,6 -2295,11 +2295,11 @@@
  	}
  	mlx4_en_set_default_moderation(priv);
  	queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+ 
+ 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ 		queue_delayed_work(mdev->workqueue, &priv->service_task,
+ 				   SERVICE_TASK_DELAY);
+ 
  	return 0;
  
  out:
diff --combined drivers/net/ethernet/mellanox/mlx4/mcg.c
index 00b4e7b,ffc78d2..f3e804f
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@@ -645,37 -645,25 +645,37 @@@ static int find_entry(struct mlx4_dev *
  	return err;
  }
  
 +static const u8 __promisc_mode[] = {
 +	[MLX4_FS_REGULAR]   = 0x0,
 +	[MLX4_FS_ALL_DEFAULT] = 0x1,
 +	[MLX4_FS_MC_DEFAULT] = 0x3,
 +	[MLX4_FS_UC_SNIFFER] = 0x4,
 +	[MLX4_FS_MC_SNIFFER] = 0x5,
 +};
 +
 +int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
 +				    enum mlx4_net_trans_promisc_mode flow_type)
 +{
 +	if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
 +		mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
 +		return -EINVAL;
 +	}
 +	return __promisc_mode[flow_type];
 +}
 +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
 +
  static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
  				  struct mlx4_net_trans_rule_hw_ctrl *hw)
  {
 -	static const u8 __promisc_mode[] = {
 -		[MLX4_FS_PROMISC_NONE]   = 0x0,
 -		[MLX4_FS_PROMISC_UPLINK] = 0x1,
 -		[MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
 -		[MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
 -	};
 -
 -	u32 dw = 0;
 -
 -	dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
 -	dw |= ctrl->exclusive ? (1 << 2) : 0;
 -	dw |= ctrl->allow_loopback ? (1 << 3) : 0;
 -	dw |= __promisc_mode[ctrl->promisc_mode] << 8;
 -	dw |= ctrl->priority << 16;
 -
 -	hw->ctrl = cpu_to_be32(dw);
 +	u8 flags = 0;
 +
 +	flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
 +	flags |= ctrl->exclusive ? (1 << 2) : 0;
 +	flags |= ctrl->allow_loopback ? (1 << 3) : 0;
 +
 +	hw->flags = flags;
 +	hw->type = __promisc_mode[ctrl->promisc_mode];
 +	hw->prio = cpu_to_be16(ctrl->priority);
  	hw->port = ctrl->port;
  	hw->qpn = cpu_to_be32(ctrl->qpn);
  }
@@@ -689,51 -677,29 +689,51 @@@ const u16 __sw_id_hw[] = 
  	[MLX4_NET_TRANS_RULE_ID_UDP]     = 0xE006
  };
  
 +int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
 +				  enum mlx4_net_trans_rule_id id)
 +{
 +	if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
 +		mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
 +		return -EINVAL;
 +	}
 +	return __sw_id_hw[id];
 +}
 +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
 +
 +static const int __rule_hw_sz[] = {
 +	[MLX4_NET_TRANS_RULE_ID_ETH] =
 +		sizeof(struct mlx4_net_trans_rule_hw_eth),
 +	[MLX4_NET_TRANS_RULE_ID_IB] =
 +		sizeof(struct mlx4_net_trans_rule_hw_ib),
 +	[MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
 +	[MLX4_NET_TRANS_RULE_ID_IPV4] =
 +		sizeof(struct mlx4_net_trans_rule_hw_ipv4),
 +	[MLX4_NET_TRANS_RULE_ID_TCP] =
 +		sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
 +	[MLX4_NET_TRANS_RULE_ID_UDP] =
 +		sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
 +};
 +
 +int mlx4_hw_rule_sz(struct mlx4_dev *dev,
 +	       enum mlx4_net_trans_rule_id id)
 +{
 +	if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
 +		mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
 +		return -EINVAL;
 +	}
 +
 +	return __rule_hw_sz[id];
 +}
 +EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
 +
  static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
  			    struct _rule_hw *rule_hw)
  {
 -	static const size_t __rule_hw_sz[] = {
 -		[MLX4_NET_TRANS_RULE_ID_ETH] =
 -			sizeof(struct mlx4_net_trans_rule_hw_eth),
 -		[MLX4_NET_TRANS_RULE_ID_IB] =
 -			sizeof(struct mlx4_net_trans_rule_hw_ib),
 -		[MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
 -		[MLX4_NET_TRANS_RULE_ID_IPV4] =
 -			sizeof(struct mlx4_net_trans_rule_hw_ipv4),
 -		[MLX4_NET_TRANS_RULE_ID_TCP] =
 -			sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
 -		[MLX4_NET_TRANS_RULE_ID_UDP] =
 -			sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
 -	};
 -	if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
 -		mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
 +	if (mlx4_hw_rule_sz(dev, spec->id) < 0)
  		return -EINVAL;
 -	}
 -	memset(rule_hw, 0, __rule_hw_sz[spec->id]);
 +	memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
  	rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
 -	rule_hw->size = __rule_hw_sz[spec->id] >> 2;
 +	rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
  
  	switch (spec->id) {
  	case MLX4_NET_TRANS_RULE_ID_ETH:
@@@ -747,12 -713,12 +747,12 @@@
  			rule_hw->eth.ether_type_enable = 1;
  			rule_hw->eth.ether_type = spec->eth.ether_type;
  		}
 -		rule_hw->eth.vlan_id = spec->eth.vlan_id;
 -		rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
 +		rule_hw->eth.vlan_tag = spec->eth.vlan_id;
 +		rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
  		break;
  
  	case MLX4_NET_TRANS_RULE_ID_IB:
 -		rule_hw->ib.qpn = spec->ib.r_qpn;
 +		rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
  		rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
  		memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
  		memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
@@@ -1159,35 -1125,18 +1159,18 @@@ static int mlx4_QP_ATTACH(struct mlx4_d
  	return err;
  }
  
- int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- 			  u8 port, int block_mcast_loopback,
- 			  enum mlx4_protocol prot, u64 *reg_id)
+ int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ 			      u8 gid[16], u8 port,
+ 			      int block_mcast_loopback,
+ 			      enum mlx4_protocol prot, u64 *reg_id)
  {
- 
- 	switch (dev->caps.steering_mode) {
- 	case MLX4_STEERING_MODE_A0:
- 		if (prot == MLX4_PROT_ETH)
- 			return 0;
- 
- 	case MLX4_STEERING_MODE_B0:
- 		if (prot == MLX4_PROT_ETH)
- 			gid[7] |= (MLX4_MC_STEER << 1);
- 
- 		if (mlx4_is_mfunc(dev))
- 			return mlx4_QP_ATTACH(dev, qp, gid, 1,
- 					      block_mcast_loopback, prot);
- 		return mlx4_qp_attach_common(dev, qp, gid,
- 					     block_mcast_loopback, prot,
- 					     MLX4_MC_STEER);
- 
- 	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
  		struct mlx4_spec_list spec = { {NULL} };
  		__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
  
  		struct mlx4_net_trans_rule rule = {
  			.queue_mode = MLX4_NET_TRANS_Q_FIFO,
  			.exclusive = 0,
 -			.promisc_mode = MLX4_FS_PROMISC_NONE,
 +			.promisc_mode = MLX4_FS_REGULAR,
  			.priority = MLX4_DOMAIN_NIC,
  		};
  
@@@ -1214,8 -1163,32 +1197,32 @@@
  		list_add_tail(&spec.list, &rule.list);
  
  		return mlx4_flow_attach(dev, &rule, reg_id);
- 	}
+ }
+ 
+ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ 			  u8 port, int block_mcast_loopback,
+ 			  enum mlx4_protocol prot, u64 *reg_id)
+ {
+ 	switch (dev->caps.steering_mode) {
+ 	case MLX4_STEERING_MODE_A0:
+ 		if (prot == MLX4_PROT_ETH)
+ 			return 0;
+ 
+ 	case MLX4_STEERING_MODE_B0:
+ 		if (prot == MLX4_PROT_ETH)
+ 			gid[7] |= (MLX4_MC_STEER << 1);
+ 
+ 		if (mlx4_is_mfunc(dev))
+ 			return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ 					      block_mcast_loopback, prot);
+ 		return mlx4_qp_attach_common(dev, qp, gid,
+ 					     block_mcast_loopback, prot,
+ 					     MLX4_MC_STEER);
  
+ 	case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ 		return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
+ 						 block_mcast_loopback,
+ 						 prot, reg_id);
  	default:
  		return -EINVAL;
  	}
@@@ -1256,10 -1229,11 +1263,10 @@@ int mlx4_flow_steer_promisc_add(struct 
  	u64 *regid_p;
  
  	switch (mode) {
 -	case MLX4_FS_PROMISC_UPLINK:
 -	case MLX4_FS_PROMISC_FUNCTION_PORT:
 +	case MLX4_FS_ALL_DEFAULT:
  		regid_p = &dev->regid_promisc_array[port];
  		break;
 -	case MLX4_FS_PROMISC_ALL_MULTI:
 +	case MLX4_FS_MC_DEFAULT:
  		regid_p = &dev->regid_allmulti_array[port];
  		break;
  	default:
@@@ -1286,10 -1260,11 +1293,10 @@@ int mlx4_flow_steer_promisc_remove(stru
  	u64 *regid_p;
  
  	switch (mode) {
 -	case MLX4_FS_PROMISC_UPLINK:
 -	case MLX4_FS_PROMISC_FUNCTION_PORT:
 +	case MLX4_FS_ALL_DEFAULT:
  		regid_p = &dev->regid_promisc_array[port];
  		break;
 -	case MLX4_FS_PROMISC_ALL_MULTI:
 +	case MLX4_FS_MC_DEFAULT:
  		regid_p = &dev->regid_allmulti_array[port];
  		break;
  	default:
diff --combined drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d5fdb19,eac3dae..df15bb6
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@@ -87,7 -87,8 +87,8 @@@ enum 
  	MLX4_HCR_SIZE		= 0x0001c,
  	MLX4_CLR_INT_SIZE	= 0x00008,
  	MLX4_SLAVE_COMM_BASE	= 0x0,
- 	MLX4_COMM_PAGESIZE	= 0x1000
+ 	MLX4_COMM_PAGESIZE	= 0x1000,
+ 	MLX4_CLOCK_SIZE		= 0x00008
  };
  
  enum {
@@@ -403,6 -404,7 +404,7 @@@ struct mlx4_fw 
  	u64			clr_int_base;
  	u64			catas_offset;
  	u64			comm_base;
+ 	u64			clock_offset;
  	struct mlx4_icm	       *fw_icm;
  	struct mlx4_icm	       *aux_icm;
  	u32			catas_size;
@@@ -410,6 -412,7 +412,7 @@@
  	u8			clr_int_bar;
  	u8			catas_bar;
  	u8			comm_bar;
+ 	u8			clock_bar;
  };
  
  struct mlx4_comm {
@@@ -470,6 -473,30 +473,30 @@@ struct mlx4_slave_state 
  	enum slave_port_state port_state[MLX4_MAX_PORTS + 1];
  };
  
+ #define MLX4_VGT 4095
+ #define NO_INDX  (-1)
+ 
+ struct mlx4_vport_state {
+ 	u64 mac;
+ 	u16 default_vlan;
+ 	u8  default_qos;
+ 	u32 tx_rate;
+ 	bool spoofchk;
+ };
+ 
+ struct mlx4_vf_admin_state {
+ 	struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1];
+ };
+ 
+ struct mlx4_vport_oper_state {
+ 	struct mlx4_vport_state state;
+ 	int mac_idx;
+ 	int vlan_idx;
+ };
+ struct mlx4_vf_oper_state {
+ 	struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1];
+ };
+ 
  struct slave_list {
  	struct mutex mutex;
  	struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
@@@ -500,6 -527,8 +527,8 @@@ struct mlx4_master_qp0_state 
  
  struct mlx4_mfunc_master_ctx {
  	struct mlx4_slave_state *slave_state;
+ 	struct mlx4_vf_admin_state *vf_admin;
+ 	struct mlx4_vf_oper_state *vf_oper;
  	struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
  	int			init_port_ref[MLX4_MAX_PORTS + 1];
  	u16			max_mtu[MLX4_MAX_PORTS + 1];
@@@ -701,6 -730,85 +730,6 @@@ struct mlx4_steer 
  	struct list_head steer_entries[MLX4_NUM_STEERS];
  };
  
 -struct mlx4_net_trans_rule_hw_ctrl {
 -	__be32 ctrl;
 -	u8 rsvd1;
 -	u8 funcid;
 -	u8 vep;
 -	u8 port;
 -	__be32 qpn;
 -	__be32 rsvd2;
 -};
 -
 -struct mlx4_net_trans_rule_hw_ib {
 -	u8 size;
 -	u8 rsvd1;
 -	__be16 id;
 -	u32 rsvd2;
 -	__be32 qpn;
 -	__be32 qpn_mask;
 -	u8 dst_gid[16];
 -	u8 dst_gid_msk[16];
 -} __packed;
 -
 -struct mlx4_net_trans_rule_hw_eth {
 -	u8	size;
 -	u8	rsvd;
 -	__be16	id;
 -	u8	rsvd1[6];
 -	u8	dst_mac[6];
 -	u16	rsvd2;
 -	u8	dst_mac_msk[6];
 -	u16	rsvd3;
 -	u8	src_mac[6];
 -	u16	rsvd4;
 -	u8	src_mac_msk[6];
 -	u8      rsvd5;
 -	u8      ether_type_enable;
 -	__be16  ether_type;
 -	__be16  vlan_id_msk;
 -	__be16  vlan_id;
 -} __packed;
 -
 -struct mlx4_net_trans_rule_hw_tcp_udp {
 -	u8	size;
 -	u8	rsvd;
 -	__be16	id;
 -	__be16	rsvd1[3];
 -	__be16	dst_port;
 -	__be16	rsvd2;
 -	__be16	dst_port_msk;
 -	__be16	rsvd3;
 -	__be16	src_port;
 -	__be16	rsvd4;
 -	__be16	src_port_msk;
 -} __packed;
 -
 -struct mlx4_net_trans_rule_hw_ipv4 {
 -	u8	size;
 -	u8	rsvd;
 -	__be16	id;
 -	__be32	rsvd1;
 -	__be32	dst_ip;
 -	__be32	dst_ip_msk;
 -	__be32	src_ip;
 -	__be32	src_ip_msk;
 -} __packed;
 -
 -struct _rule_hw {
 -	union {
 -		struct {
 -			u8 size;
 -			u8 rsvd;
 -			__be16 id;
 -		};
 -		struct mlx4_net_trans_rule_hw_eth eth;
 -		struct mlx4_net_trans_rule_hw_ib ib;
 -		struct mlx4_net_trans_rule_hw_ipv4 ipv4;
 -		struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
 -	};
 -};
 -
  enum {
  	MLX4_PCI_DEV_IS_VF		= 1 << 0,
  	MLX4_PCI_DEV_FORCE_SENSE_PORT	= 1 << 1,
@@@ -747,6 -855,7 +776,7 @@@ struct mlx4_priv 
  	struct list_head	bf_list;
  	struct mutex		bf_mutex;
  	struct io_mapping	*bf_mapping;
+ 	void __iomem            *clock_mapping;
  	int			reserved_mtts;
  	int			fs_hash_mode;
  	u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
@@@ -1048,6 -1157,8 +1078,8 @@@ int mlx4_change_port_types(struct mlx4_
  
  void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
  void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
+ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
  
  int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
  /* resource tracker functions*/
@@@ -1111,6 -1222,10 +1143,10 @@@ int mlx4_qp_detach_common(struct mlx4_d
  int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  			  int block_mcast_loopback, enum mlx4_protocol prot,
  			  enum mlx4_steer_type steer);
+ int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ 			      u8 gid[16], u8 port,
+ 			      int block_mcast_loopback,
+ 			      enum mlx4_protocol prot, u64 *reg_id);
  int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  				struct mlx4_vhcr *vhcr,
  				struct mlx4_cmd_mailbox *inbox,
diff --combined drivers/net/ethernet/realtek/r8169.c
index 15ba8c4,c6dac38..79c520b
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@@ -47,7 -47,9 +47,9 @@@
  #define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
  #define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
  #define FIRMWARE_8106E_1	"rtl_nic/rtl8106e-1.fw"
- #define FIRMWARE_8168G_1	"rtl_nic/rtl8168g-1.fw"
+ #define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
+ #define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
+ #define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
  
  #ifdef RTL8169_DEBUG
  #define assert(expr) \
@@@ -140,6 -142,8 +142,8 @@@ enum mac_version 
  	RTL_GIGA_MAC_VER_39,
  	RTL_GIGA_MAC_VER_40,
  	RTL_GIGA_MAC_VER_41,
+ 	RTL_GIGA_MAC_VER_42,
+ 	RTL_GIGA_MAC_VER_43,
  	RTL_GIGA_MAC_NONE   = 0xff,
  };
  
@@@ -262,10 -266,16 +266,16 @@@ static const struct 
  		_R("RTL8106e",		RTL_TD_1, FIRMWARE_8106E_1,
  							JUMBO_1K, true),
  	[RTL_GIGA_MAC_VER_40] =
- 		_R("RTL8168g/8111g",	RTL_TD_1, FIRMWARE_8168G_1,
+ 		_R("RTL8168g/8111g",	RTL_TD_1, FIRMWARE_8168G_2,
  							JUMBO_9K, false),
  	[RTL_GIGA_MAC_VER_41] =
  		_R("RTL8168g/8111g",	RTL_TD_1, NULL, JUMBO_9K, false),
+ 	[RTL_GIGA_MAC_VER_42] =
+ 		_R("RTL8168g/8111g",	RTL_TD_1, FIRMWARE_8168G_3,
+ 							JUMBO_9K, false),
+ 	[RTL_GIGA_MAC_VER_43] =
+ 		_R("RTL8106e",		RTL_TD_1, FIRMWARE_8106E_2,
+ 							JUMBO_1K, true),
  };
  #undef _R
  
@@@ -329,6 -339,7 +339,7 @@@ enum rtl_registers 
  #define	RXCFG_FIFO_SHIFT		13
  					/* No threshold before first PCI xfer */
  #define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
+ #define	RX_EARLY_OFF			(1 << 11)
  #define	RXCFG_DMA_SHIFT			8
  					/* Unlimited maximum PCI burst. */
  #define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
@@@ -513,6 -524,7 +524,7 @@@ enum rtl_register_content 
  	PMEnable	= (1 << 0),	/* Power Management Enable */
  
  	/* Config2 register p. 25 */
+ 	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
  	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
  	PCI_Clock_66MHz = 0x01,
  	PCI_Clock_33MHz = 0x00,
@@@ -533,6 -545,7 +545,7 @@@
  	Spi_en		= (1 << 3),
  	LanWake		= (1 << 1),	/* LanWake enable/disable */
  	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
+ 	ASPM_en		= (1 << 0),	/* ASPM enable */
  
  	/* TBICSR p.28 */
  	TBIReset	= 0x80000000,
@@@ -814,7 -827,9 +827,9 @@@ MODULE_FIRMWARE(FIRMWARE_8168F_2)
  MODULE_FIRMWARE(FIRMWARE_8402_1);
  MODULE_FIRMWARE(FIRMWARE_8411_1);
  MODULE_FIRMWARE(FIRMWARE_8106E_1);
- MODULE_FIRMWARE(FIRMWARE_8168G_1);
+ MODULE_FIRMWARE(FIRMWARE_8106E_2);
+ MODULE_FIRMWARE(FIRMWARE_8168G_2);
+ MODULE_FIRMWARE(FIRMWARE_8168G_3);
  
  static void rtl_lock_work(struct rtl8169_private *tp)
  {
@@@ -1024,14 -1039,6 +1039,6 @@@ static u16 r8168_phy_ocp_read(struct rt
  		(RTL_R32(GPHY_OCP) & 0xffff) : ~0;
  }
  
- static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
- {
- 	int val;
- 
- 	val = r8168_phy_ocp_read(tp, reg);
- 	r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
- }
- 
  static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
  {
  	void __iomem *ioaddr = tp->mmio_addr;
@@@ -1077,6 -1084,21 +1084,21 @@@ static int r8168g_mdio_read(struct rtl8
  	return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
  }
  
+ static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
+ {
+ 	if (reg == 0x1f) {
+ 		tp->ocp_base = value << 4;
+ 		return;
+ 	}
+ 
+ 	r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
+ }
+ 
+ static int mac_mcu_read(struct rtl8169_private *tp, int reg)
+ {
+ 	return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
+ }
+ 
  DECLARE_RTL_COND(rtl_phyar_cond)
  {
  	void __iomem *ioaddr = tp->mmio_addr;
@@@ -1771,16 -1793,17 +1793,17 @@@ static void __rtl8169_set_features(stru
  	netdev_features_t changed = features ^ dev->features;
  	void __iomem *ioaddr = tp->mmio_addr;
  
- 	if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
+ 	if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
+ 			 NETIF_F_HW_VLAN_CTAG_RX)))
  		return;
  
- 	if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
+ 	if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
  		if (features & NETIF_F_RXCSUM)
  			tp->cp_cmd |= RxChkSum;
  		else
  			tp->cp_cmd &= ~RxChkSum;
  
- 		if (dev->features & NETIF_F_HW_VLAN_RX)
+ 		if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
  			tp->cp_cmd |= RxVlan;
  		else
  			tp->cp_cmd &= ~RxVlan;
@@@ -1820,7 -1843,7 +1843,7 @@@ static void rtl8169_rx_vlan_tag(struct 
  	u32 opts2 = le32_to_cpu(desc->opts2);
  
  	if (opts2 & RxVlanTag)
- 		__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+ 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
  }
  
  static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@@ -2028,6 -2051,7 +2051,7 @@@ static void rtl8169_get_mac_version(str
  		int mac_version;
  	} mac_info[] = {
  		/* 8168G family. */
+ 		{ 0x7cf00000, 0x50900000,	RTL_GIGA_MAC_VER_42 },
  		{ 0x7cf00000, 0x4c100000,	RTL_GIGA_MAC_VER_41 },
  		{ 0x7cf00000, 0x4c000000,	RTL_GIGA_MAC_VER_40 },
  
@@@ -2116,6 -2140,10 +2140,10 @@@
  		netif_notice(tp, probe, dev,
  			     "unknown MAC, using family default\n");
  		tp->mac_version = default_version;
+ 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
+ 		tp->mac_version = tp->mii.supports_gmii ?
+ 				  RTL_GIGA_MAC_VER_42 :
+ 				  RTL_GIGA_MAC_VER_43;
  	}
  }
  
@@@ -2142,9 -2170,7 +2170,7 @@@ static void rtl_writephy_batch(struct r
  #define PHY_DATA_OR		0x10000000
  #define PHY_DATA_AND		0x20000000
  #define PHY_BJMPN		0x30000000
- #define PHY_READ_EFUSE		0x40000000
- #define PHY_READ_MAC_BYTE	0x50000000
- #define PHY_WRITE_MAC_BYTE	0x60000000
+ #define PHY_MDIO_CHG		0x40000000
  #define PHY_CLEAR_READCOUNT	0x70000000
  #define PHY_WRITE		0x80000000
  #define PHY_READCOUNT_EQ_SKIP	0x90000000
@@@ -2153,7 -2179,6 +2179,6 @@@
  #define PHY_WRITE_PREVIOUS	0xc0000000
  #define PHY_SKIPN		0xd0000000
  #define PHY_DELAY_MS		0xe0000000
- #define PHY_WRITE_ERI_WORD	0xf0000000
  
  struct fw_info {
  	u32	magic;
@@@ -2230,7 -2255,7 +2255,7 @@@ static bool rtl_fw_data_ok(struct rtl81
  		case PHY_READ:
  		case PHY_DATA_OR:
  		case PHY_DATA_AND:
- 		case PHY_READ_EFUSE:
+ 		case PHY_MDIO_CHG:
  		case PHY_CLEAR_READCOUNT:
  		case PHY_WRITE:
  		case PHY_WRITE_PREVIOUS:
@@@ -2261,9 -2286,6 +2286,6 @@@
  			}
  			break;
  
- 		case PHY_READ_MAC_BYTE:
- 		case PHY_WRITE_MAC_BYTE:
- 		case PHY_WRITE_ERI_WORD:
  		default:
  			netif_err(tp, ifup, tp->dev,
  				  "Invalid action 0x%08x\n", action);
@@@ -2294,10 -2316,13 +2316,13 @@@ out
  static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
  {
  	struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ 	struct mdio_ops org, *ops = &tp->mdio_ops;
  	u32 predata, count;
  	size_t index;
  
  	predata = count = 0;
+ 	org.write = ops->write;
+ 	org.read = ops->read;
  
  	for (index = 0; index < pa->size; ) {
  		u32 action = le32_to_cpu(pa->code[index]);
@@@ -2324,8 -2349,15 +2349,15 @@@
  		case PHY_BJMPN:
  			index -= regno;
  			break;
- 		case PHY_READ_EFUSE:
- 			predata = rtl8168d_efuse_read(tp, regno);
+ 		case PHY_MDIO_CHG:
+ 			if (data == 0) {
+ 				ops->write = org.write;
+ 				ops->read = org.read;
+ 			} else if (data == 1) {
+ 				ops->write = mac_mcu_write;
+ 				ops->read = mac_mcu_read;
+ 			}
+ 
  			index++;
  			break;
  		case PHY_CLEAR_READCOUNT:
@@@ -2361,13 -2393,13 +2393,13 @@@
  			index++;
  			break;
  
- 		case PHY_READ_MAC_BYTE:
- 		case PHY_WRITE_MAC_BYTE:
- 		case PHY_WRITE_ERI_WORD:
  		default:
  			BUG();
  		}
  	}
+ 
+ 	ops->write = org.write;
+ 	ops->read = org.read;
  }
  
  static void rtl_release_firmware(struct rtl8169_private *tp)
@@@ -3368,51 -3400,68 +3400,68 @@@ static void rtl8411_hw_phy_config(struc
  
  static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
  {
- 	static const u16 mac_ocp_patch[] = {
- 		0xe008, 0xe01b, 0xe01d, 0xe01f,
- 		0xe021, 0xe023, 0xe025, 0xe027,
- 		0x49d2, 0xf10d, 0x766c, 0x49e2,
- 		0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
- 
- 		0x77c0, 0x4870, 0x9fc0, 0x1ea0,
- 		0xc707, 0x8ee1, 0x9d6c, 0xc603,
- 		0xbe00, 0xb416, 0x0076, 0xe86c,
- 		0xc602, 0xbe00, 0x0000, 0xc602,
- 
- 		0xbe00, 0x0000, 0xc602, 0xbe00,
- 		0x0000, 0xc602, 0xbe00, 0x0000,
- 		0xc602, 0xbe00, 0x0000, 0xc602,
- 		0xbe00, 0x0000, 0xc602, 0xbe00,
- 
- 		0x0000, 0x0000, 0x0000, 0x0000
- 	};
- 	u32 i;
+ 	rtl_apply_firmware(tp);
  
- 	/* Patch code for GPHY reset */
- 	for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
- 		r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
- 	r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
- 	r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
+ 	rtl_writephy(tp, 0x1f, 0x0a46);
+ 	if (rtl_readphy(tp, 0x10) & 0x0100) {
+ 		rtl_writephy(tp, 0x1f, 0x0bcc);
+ 		rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000);
+ 	} else {
+ 		rtl_writephy(tp, 0x1f, 0x0bcc);
+ 		rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000);
+ 	}
  
- 	rtl_apply_firmware(tp);
+ 	rtl_writephy(tp, 0x1f, 0x0a46);
+ 	if (rtl_readphy(tp, 0x13) & 0x0100) {
+ 		rtl_writephy(tp, 0x1f, 0x0c41);
+ 		rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000);
+ 	} else {
+ 		rtl_writephy(tp, 0x1f, 0x0c41);
+ 		rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002);
+ 	}
  
- 	if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
- 		rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
- 	else
- 		rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
+ 	/* Enable PHY auto speed down */
+ 	rtl_writephy(tp, 0x1f, 0x0a44);
+ 	rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000);
+ 
+ 	rtl_writephy(tp, 0x1f, 0x0bcc);
+ 	rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000);
+ 	rtl_writephy(tp, 0x1f, 0x0a44);
+ 	rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000);
+ 	rtl_writephy(tp, 0x1f, 0x0a43);
+ 	rtl_writephy(tp, 0x13, 0x8084);
+ 	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000);
+ 	rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000);
+ 
+ 	/* EEE auto-fallback function */
+ 	rtl_writephy(tp, 0x1f, 0x0a4b);
+ 	rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000);
+ 
+ 	/* Enable UC LPF tune function */
+ 	rtl_writephy(tp, 0x1f, 0x0a43);
+ 	rtl_writephy(tp, 0x13, 0x8012);
+ 	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
  
- 	if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
- 		rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
- 	else
- 		rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
+ 	rtl_writephy(tp, 0x1f, 0x0c42);
+ 	rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000);
  
- 	rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
- 	rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
+ 	/* Improve SWR Efficiency */
+ 	rtl_writephy(tp, 0x1f, 0x0bcd);
+ 	rtl_writephy(tp, 0x14, 0x5065);
+ 	rtl_writephy(tp, 0x14, 0xd065);
+ 	rtl_writephy(tp, 0x1f, 0x0bc8);
+ 	rtl_writephy(tp, 0x11, 0x5655);
+ 	rtl_writephy(tp, 0x1f, 0x0bcd);
+ 	rtl_writephy(tp, 0x14, 0x1065);
+ 	rtl_writephy(tp, 0x14, 0x9065);
+ 	rtl_writephy(tp, 0x14, 0x1065);
  
- 	r8168_phy_ocp_write(tp, 0xa436, 0x8012);
- 	rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
+ 	rtl_writephy(tp, 0x1f, 0x0000);
+ }
  
- 	rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
+ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
+ {
+ 	rtl_apply_firmware(tp);
  }
  
  static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
@@@ -3600,6 -3649,10 +3649,10 @@@ static void rtl_hw_phy_config(struct ne
  	case RTL_GIGA_MAC_VER_40:
  		rtl8168g_1_hw_phy_config(tp);
  		break;
+ 	case RTL_GIGA_MAC_VER_42:
+ 	case RTL_GIGA_MAC_VER_43:
+ 		rtl8168g_2_hw_phy_config(tp);
+ 		break;
  
  	case RTL_GIGA_MAC_VER_41:
  	default:
@@@ -3808,6 -3861,8 +3861,8 @@@ static void rtl_init_mdio_ops(struct rt
  		break;
  	case RTL_GIGA_MAC_VER_40:
  	case RTL_GIGA_MAC_VER_41:
+ 	case RTL_GIGA_MAC_VER_42:
+ 	case RTL_GIGA_MAC_VER_43:
  		ops->write	= r8168g_mdio_write;
  		ops->read	= r8168g_mdio_read;
  		break;
@@@ -3859,6 -3914,8 +3914,8 @@@ static void rtl_wol_suspend_quirk(struc
  	case RTL_GIGA_MAC_VER_39:
  	case RTL_GIGA_MAC_VER_40:
  	case RTL_GIGA_MAC_VER_41:
+ 	case RTL_GIGA_MAC_VER_42:
+ 	case RTL_GIGA_MAC_VER_43:
  		RTL_W32(RxConfig, RTL_R32(RxConfig) |
  			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
  		break;
@@@ -3966,6 -4023,8 +4023,8 @@@ static void r8168_phy_power_down(struc
  	switch (tp->mac_version) {
  	case RTL_GIGA_MAC_VER_32:
  	case RTL_GIGA_MAC_VER_33:
+ 	case RTL_GIGA_MAC_VER_40:
+ 	case RTL_GIGA_MAC_VER_41:
  		rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
  		break;
  
@@@ -4027,6 -4086,11 +4086,11 @@@ static void r8168_pll_power_down(struc
  	case RTL_GIGA_MAC_VER_33:
  		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
  		break;
+ 	case RTL_GIGA_MAC_VER_40:
+ 	case RTL_GIGA_MAC_VER_41:
+ 		rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
+ 			     0xfc000000, ERIAR_EXGMAC);
+ 		break;
  	}
  }
  
@@@ -4044,6 -4108,11 +4108,11 @@@ static void r8168_pll_power_up(struct r
  	case RTL_GIGA_MAC_VER_33:
  		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
  		break;
+ 	case RTL_GIGA_MAC_VER_40:
+ 	case RTL_GIGA_MAC_VER_41:
+ 		rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
+ 			     0x00000000, ERIAR_EXGMAC);
+ 		break;
  	}
  
  	r8168_phy_power_up(tp);
@@@ -4080,6 -4149,7 +4149,7 @@@ static void rtl_init_pll_power_ops(stru
  	case RTL_GIGA_MAC_VER_30:
  	case RTL_GIGA_MAC_VER_37:
  	case RTL_GIGA_MAC_VER_39:
+ 	case RTL_GIGA_MAC_VER_43:
  		ops->down	= r810x_pll_power_down;
  		ops->up		= r810x_pll_power_up;
  		break;
@@@ -4107,6 -4177,7 +4177,7 @@@
  	case RTL_GIGA_MAC_VER_38:
  	case RTL_GIGA_MAC_VER_40:
  	case RTL_GIGA_MAC_VER_41:
+ 	case RTL_GIGA_MAC_VER_42:
  		ops->down	= r8168_pll_power_down;
  		ops->up		= r8168_pll_power_up;
  		break;
@@@ -4149,6 -4220,12 +4220,12 @@@ static void rtl_init_rxcfg(struct rtl81
  	case RTL_GIGA_MAC_VER_34:
  		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
  		break;
+ 	case RTL_GIGA_MAC_VER_40:
+ 	case RTL_GIGA_MAC_VER_41:
+ 	case RTL_GIGA_MAC_VER_42:
+ 	case RTL_GIGA_MAC_VER_43:
+ 		RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ 		break;
  	default:
  		RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
  		break;
@@@ -4305,6 -4382,8 +4382,8 @@@ static void rtl_init_jumbo_ops(struct r
  	 */
  	case RTL_GIGA_MAC_VER_40:
  	case RTL_GIGA_MAC_VER_41:
+ 	case RTL_GIGA_MAC_VER_42:
+ 	case RTL_GIGA_MAC_VER_43:
  	default:
  		ops->disable	= NULL;
  		ops->enable	= NULL;
@@@ -4412,6 -4491,8 +4491,8 @@@ static void rtl8169_hw_reset(struct rtl
  	           tp->mac_version == RTL_GIGA_MAC_VER_37 ||
  	           tp->mac_version == RTL_GIGA_MAC_VER_40 ||
  	           tp->mac_version == RTL_GIGA_MAC_VER_41 ||
+ 	           tp->mac_version == RTL_GIGA_MAC_VER_42 ||
+ 	           tp->mac_version == RTL_GIGA_MAC_VER_43 ||
  	           tp->mac_version == RTL_GIGA_MAC_VER_38) {
  		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
  		rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
@@@ -5127,6 -5208,8 +5208,8 @@@ static void rtl_hw_start_8168g_1(struc
  	void __iomem *ioaddr = tp->mmio_addr;
  	struct pci_dev *pdev = tp->pci_dev;
  
+ 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ 
  	rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
  	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
  	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@@ -5138,6 -5221,7 +5221,7 @@@
  
  	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
  	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ 	rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
  
  	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
  	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
@@@ -5149,7 -5233,26 +5233,26 @@@
  	/* Adjust EEE LED frequency */
  	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
  
- 	rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
+ 	rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
+ 	rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+ }
+ 
+ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
+ {
+ 	void __iomem *ioaddr = tp->mmio_addr;
+ 	static const struct ephy_info e_info_8168g_2[] = {
+ 		{ 0x00, 0x0000,	0x0008 },
+ 		{ 0x0c, 0x3df0,	0x0200 },
+ 		{ 0x19, 0xffff,	0xfc00 },
+ 		{ 0x1e, 0xffff,	0x20eb }
+ 	};
+ 
+ 	rtl_hw_start_8168g_1(tp);
+ 
+ 	/* disable aspm and clock request before access ephy */
+ 	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
+ 	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ 	rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
  }
  
  static void rtl_hw_start_8168(struct net_device *dev)
@@@ -5177,10 -5280,7 +5280,7 @@@
  
  	rtl_set_rx_tx_desc_registers(tp, ioaddr);
  
- 	rtl_set_rx_mode(dev);
- 
- 	RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
- 		(InterFrameGap << TxInterFrameGapShift));
+ 	rtl_set_rx_tx_config_registers(tp);
  
  	RTL_R8(IntrMask);
  
@@@ -5257,6 -5357,9 +5357,9 @@@
  	case RTL_GIGA_MAC_VER_41:
  		rtl_hw_start_8168g_1(tp);
  		break;
+ 	case RTL_GIGA_MAC_VER_42:
+ 		rtl_hw_start_8168g_2(tp);
+ 		break;
  
  	default:
  		printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
@@@ -5264,9 -5367,11 +5367,11 @@@
  		break;
  	}
  
+ 	RTL_W8(Cfg9346, Cfg9346_Lock);
+ 
  	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
  
- 	RTL_W8(Cfg9346, Cfg9346_Lock);
+ 	rtl_set_rx_mode(dev);
  
  	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
  }
@@@ -5424,6 -5529,17 +5529,17 @@@ static void rtl_hw_start_8101(struct ne
  
  	RTL_W8(Cfg9346, Cfg9346_Unlock);
  
+ 	RTL_W8(MaxTxPacketSize, TxPacketMax);
+ 
+ 	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+ 
+ 	tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
+ 	RTL_W16(CPlusCmd, tp->cp_cmd);
+ 
+ 	rtl_set_rx_tx_desc_registers(tp, ioaddr);
+ 
+ 	rtl_set_rx_tx_config_registers(tp);
+ 
  	switch (tp->mac_version) {
  	case RTL_GIGA_MAC_VER_07:
  		rtl_hw_start_8102e_1(tp);
@@@ -5451,28 -5567,21 +5567,21 @@@
  	case RTL_GIGA_MAC_VER_39:
  		rtl_hw_start_8106(tp);
  		break;
+ 	case RTL_GIGA_MAC_VER_43:
+ 		rtl_hw_start_8168g_2(tp);
+ 		break;
  	}
  
  	RTL_W8(Cfg9346, Cfg9346_Lock);
  
- 	RTL_W8(MaxTxPacketSize, TxPacketMax);
- 
- 	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- 
- 	tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
- 	RTL_W16(CPlusCmd, tp->cp_cmd);
- 
  	RTL_W16(IntrMitigate, 0x0000);
  
- 	rtl_set_rx_tx_desc_registers(tp, ioaddr);
- 
  	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
  
  	rtl_set_rx_mode(dev);
  
+ 	RTL_R8(IntrMask);
+ 
  	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
  }
  
@@@ -5787,14 -5896,6 +5896,14 @@@ static netdev_tx_t rtl8169_start_xmit(s
  		goto err_stop_0;
  	}
  
 +	/* 8168evl does not automatically pad to minimum length. */
 +	if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
 +		     skb->len < ETH_ZLEN)) {
 +		if (skb_padto(skb, ETH_ZLEN))
 +			goto err_update_stats;
 +		skb_put(skb, ETH_ZLEN - skb->len);
 +	}
 +
  	if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
  		goto err_stop_0;
  
@@@ -5866,7 -5967,6 +5975,7 @@@ err_dma_1
  	rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
  err_dma_0:
  	dev_kfree_skb(skb);
 +err_update_stats:
  	dev->stats.tx_dropped++;
  	return NETDEV_TX_OK;
  
@@@ -6753,6 -6853,8 +6862,8 @@@ static void rtl_hw_initialize(struct rt
  	switch (tp->mac_version) {
  	case RTL_GIGA_MAC_VER_40:
  	case RTL_GIGA_MAC_VER_41:
+ 	case RTL_GIGA_MAC_VER_42:
+ 	case RTL_GIGA_MAC_VER_43:
  		rtl_hw_init_8168g(tp);
  		break;
  
@@@ -6935,16 -7037,17 +7046,17 @@@ rtl_init_one(struct pci_dev *pdev, cons
  	/* don't enable SG, IP_CSUM and TSO by default - it might not work
  	 * properly for all devices */
  	dev->features |= NETIF_F_RXCSUM |
- 		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
  
  	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- 		NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 		NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ 		NETIF_F_HW_VLAN_CTAG_RX;
  	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
  		NETIF_F_HIGHDMA;
  
  	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
  		/* 8110SCd requires hardware Rx VLAN - disallow toggling */
- 		dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
+ 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
  
  	dev->hw_features |= NETIF_F_RXALL;
  	dev->hw_features |= NETIF_F_RXFCS;
diff --combined drivers/net/ethernet/sfc/falcon.c
index defed0e,4486102..71998e7
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@@ -1528,7 -1528,7 +1528,7 @@@ static int falcon_probe_nic(struct efx_
  	return 0;
  
   fail6:
 -	BUG_ON(i2c_del_adapter(&board->i2c_adap));
 +	i2c_del_adapter(&board->i2c_adap);
  	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
   fail5:
  	efx_nic_free_buffer(efx, &efx->irq_status);
@@@ -1546,10 -1546,6 +1546,6 @@@
  
  static void falcon_init_rx_cfg(struct efx_nic *efx)
  {
- 	/* Prior to Siena the RX DMA engine will split each frame at
- 	 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
- 	 * be so large that that never happens. */
- 	const unsigned huge_buf_size = (3 * 4096) >> 5;
  	/* RX control FIFO thresholds (32 entries) */
  	const unsigned ctrl_xon_thr = 20;
  	const unsigned ctrl_xoff_thr = 25;
@@@ -1557,10 -1553,15 +1553,15 @@@
  
  	efx_reado(efx, &reg, FR_AZ_RX_CFG);
  	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
- 		/* Data FIFO size is 5.5K */
+ 		/* Data FIFO size is 5.5K.  The RX DMA engine only
+ 		 * supports scattering for user-mode queues, but will
+ 		 * split DMA writes at intervals of RX_USR_BUF_SIZE
+ 		 * (32-byte units) even for kernel-mode queues.  We
+ 		 * set it to be so large that that never happens.
+ 		 */
  		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
  		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
- 				    huge_buf_size);
+ 				    (3 * 4096) >> 5);
  		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
  		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
  		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
@@@ -1569,7 -1570,7 +1570,7 @@@
  		/* Data FIFO size is 80K; register fields moved */
  		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
  		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
- 				    huge_buf_size);
+ 				    EFX_RX_USR_BUF_SIZE >> 5);
  		/* Send XON and XOFF at ~3 * max MTU away from empty/full */
  		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
  		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
@@@ -1665,11 -1666,13 +1666,11 @@@ static void falcon_remove_nic(struct ef
  {
  	struct falcon_nic_data *nic_data = efx->nic_data;
  	struct falcon_board *board = falcon_board(efx);
 -	int rc;
  
  	board->type->fini(efx);
  
  	/* Remove I2C adapter and clear it in preparation for a retry */
 -	rc = i2c_del_adapter(&board->i2c_adap);
 -	BUG_ON(rc);
 +	i2c_del_adapter(&board->i2c_adap);
  	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
  
  	efx_nic_free_buffer(efx, &efx->irq_status);
@@@ -1813,6 -1816,7 +1814,7 @@@ const struct efx_nic_type falcon_a1_nic
  	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
  	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
  	.rx_buffer_padding = 0x24,
+ 	.can_rx_scatter = false,
  	.max_interrupt_mode = EFX_INT_MODE_MSI,
  	.phys_addr_channels = 4,
  	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
@@@ -1863,6 -1867,7 +1865,7 @@@ const struct efx_nic_type falcon_b0_nic
  	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
  	.rx_buffer_hash_size = 0x10,
  	.rx_buffer_padding = 0,
+ 	.can_rx_scatter = true,
  	.max_interrupt_mode = EFX_INT_MODE_MSIX,
  	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
  				   * interrupt handler only supports 32
diff --combined drivers/net/ethernet/xircom/xirc2ps_cs.c
index 1025b4e,76210ab..bdd20b8
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@@ -1041,7 -1041,6 +1041,6 @@@ xirc2ps_interrupt(int irq, void *dev_id
  	    /* 1 extra so we can use insw */
  	    skb = netdev_alloc_skb(dev, pktlen + 3);
  	    if (!skb) {
- 		pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
  		dev->stats.rx_dropped++;
  	    } else { /* okay get the packet */
  		skb_reserve(skb, 2);
@@@ -1775,7 -1774,21 +1774,7 @@@ static struct pcmcia_driver xirc2ps_cs_
  	.suspend	= xirc2ps_suspend,
  	.resume		= xirc2ps_resume,
  };
 -
 -static int __init
 -init_xirc2ps_cs(void)
 -{
 -	return pcmcia_register_driver(&xirc2ps_cs_driver);
 -}
 -
 -static void __exit
 -exit_xirc2ps_cs(void)
 -{
 -	pcmcia_unregister_driver(&xirc2ps_cs_driver);
 -}
 -
 -module_init(init_xirc2ps_cs);
 -module_exit(exit_xirc2ps_cs);
 +module_pcmcia_driver(xirc2ps_cs_driver);
  
  #ifndef MODULE
  static int __init setup_xirc2ps_cs(char *str)
diff --combined drivers/net/ppp/ppp_synctty.c
index bdf3b13,090c834..925d3e2
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@@ -105,64 -105,15 +105,15 @@@ static const struct ppp_channel_ops syn
  };
  
  /*
-  * Utility procedures to print a buffer in hex/ascii
+  * Utility procedure to print a buffer in hex/ascii
   */
  static void
- ppp_print_hex (register __u8 * out, const __u8 * in, int count)
- {
- 	register __u8 next_ch;
- 	static const char hex[] = "0123456789ABCDEF";
- 
- 	while (count-- > 0) {
- 		next_ch = *in++;
- 		*out++ = hex[(next_ch >> 4) & 0x0F];
- 		*out++ = hex[next_ch & 0x0F];
- 		++out;
- 	}
- }
- 
- static void
- ppp_print_char (register __u8 * out, const __u8 * in, int count)
- {
- 	register __u8 next_ch;
- 
- 	while (count-- > 0) {
- 		next_ch = *in++;
- 
- 		if (next_ch < 0x20 || next_ch > 0x7e)
- 			*out++ = '.';
- 		else {
- 			*out++ = next_ch;
- 			if (next_ch == '%')   /* printk/syslogd has a bug !! */
- 				*out++ = '%';
- 		}
- 	}
- 	*out = '\0';
- }
- 
- static void
  ppp_print_buffer (const char *name, const __u8 *buf, int count)
  {
  	if (name != NULL)
  		printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
  
- 	while (count > 8) {
- 		memset (line, 32, 44);
- 		ppp_print_hex (line, buf, 8);
- 		ppp_print_char (&line[8 * 3], buf, 8);
- 		printk(KERN_DEBUG "%s\n", line);
- 		count -= 8;
- 		buf += 8;
- 	}
- 
- 	if (count > 0) {
- 		memset (line, 32, 44);
- 		ppp_print_hex (line, buf, count);
- 		ppp_print_char (&line[8 * 3], buf, count);
- 		printk(KERN_DEBUG "%s\n", line);
- 	}
+ 	print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
  }
  
  
@@@ -355,7 -306,7 +306,7 @@@ ppp_synctty_ioctl(struct tty_struct *tt
  		/* flush our buffers and the serial port's buffer */
  		if (arg == TCIOFLUSH || arg == TCOFLUSH)
  			ppp_sync_flush_output(ap);
 -		err = tty_perform_flush(tty, arg);
 +		err = n_tty_ioctl_helper(tty, file, cmd, arg);
  		break;
  
  	case FIONREAD:
diff --combined drivers/net/tun.c
index dcd0c19,66109a2..f042b03
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@@ -409,14 -409,12 +409,12 @@@ static void __tun_detach(struct tun_fil
  {
  	struct tun_file *ntfile;
  	struct tun_struct *tun;
- 	struct net_device *dev;
  
  	tun = rtnl_dereference(tfile->tun);
  
  	if (tun && !tfile->detached) {
  		u16 index = tfile->queue_index;
  		BUG_ON(index >= tun->numqueues);
- 		dev = tun->dev;
  
  		rcu_assign_pointer(tun->tfiles[index],
  				   tun->tfiles[tun->numqueues - 1]);
@@@ -1205,6 -1203,8 +1203,8 @@@ static ssize_t tun_get_user(struct tun_
  	}
  
  	skb_reset_network_header(skb);
+ 	skb_probe_transport_header(skb, 0);
+ 
  	rxhash = skb_get_rxhash(skb);
  	netif_rx_ni(skb);
  
@@@ -1471,17 -1471,14 +1471,17 @@@ static int tun_recvmsg(struct kiocb *io
  	if (!tun)
  		return -EBADFD;
  
 -	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
 -		return -EINVAL;
 +	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
 +		ret = -EINVAL;
 +		goto out;
 +	}
  	ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
  			  flags & MSG_DONTWAIT);
  	if (ret > total_len) {
  		m->msg_flags |= MSG_TRUNC;
  		ret = flags & MSG_TRUNC ? ret : total_len;
  	}
 +out:
  	tun_put(tun);
  	return ret;
  }
@@@ -1596,12 -1593,8 +1596,12 @@@ static int tun_set_iff(struct net *net
  			return err;
  
  		if (tun->flags & TUN_TAP_MQ &&
 -		    (tun->numqueues + tun->numdisabled > 1))
 -			return -EBUSY;
 +		    (tun->numqueues + tun->numdisabled > 1)) {
 +			/* One or more queue has already been attached, no need
 +			 * to initialize the device again.
 +			 */
 +			return 0;
 +		}
  	}
  	else {
  		char *name;
@@@ -1663,6 -1656,7 +1663,7 @@@
  		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
  			TUN_USER_FEATURES;
  		dev->features = dev->hw_features;
+ 		dev->vlan_features = dev->features;
  
  		INIT_LIST_HEAD(&tun->disabled);
  		err = tun_attach(tun, file);
diff --combined drivers/net/usb/cdc_mbim.c
index 32a7605,c964544..8728198
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@@ -101,7 -101,7 +101,7 @@@ static int cdc_mbim_bind(struct usbnet 
  	dev->net->flags |= IFF_NOARP;
  
  	/* no need to put the VLAN tci in the packet headers */
- 	dev->net->features |= NETIF_F_HW_VLAN_TX;
+ 	dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX;
  err:
  	return ret;
  }
@@@ -221,7 -221,7 +221,7 @@@ static struct sk_buff *cdc_mbim_process
  
  	/* map MBIM session to VLAN */
  	if (tci)
- 		vlan_put_tag(skb, tci);
+ 		vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
  err:
  	return skb;
  }
@@@ -323,11 -323,6 +323,11 @@@ static int cdc_mbim_suspend(struct usb_
  		goto error;
  	}
  
 +	/*
 +	 * Both usbnet_suspend() and subdriver->suspend() MUST return 0
 +	 * in system sleep context, otherwise, the resume callback has
 +	 * to recover device from previous suspend failure.
 +	 */
  	ret = usbnet_suspend(intf, message);
  	if (ret < 0)
  		goto error;
diff --combined drivers/net/wireless/ath/wil6210/debugfs.c
index 76c7694,4be07f5..727b1f5
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@@ -216,7 -216,7 +216,7 @@@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32
  			wil_debugfs_iomem_x32_set, "0x%08llx\n");
  
  static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
 -						   mode_t mode,
 +						   umode_t mode,
  						   struct dentry *parent,
  						   void __iomem *value)
  {
@@@ -312,14 -312,6 +312,6 @@@ static const struct file_operations fop
  	.llseek		= seq_lseek,
  };
  
- static int wil_default_open(struct inode *inode, struct file *file)
- {
- 	if (inode->i_private)
- 		file->private_data = inode->i_private;
- 
- 	return 0;
- }
- 
  static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
  				size_t count, loff_t *ppos)
  {
@@@ -361,13 -353,13 +353,13 @@@
  
  static const struct file_operations fops_ioblob = {
  	.read =		wil_read_file_ioblob,
- 	.open =		wil_default_open,
+ 	.open =		simple_open,
  	.llseek =	default_llseek,
  };
  
  static
  struct dentry *wil_debugfs_create_ioblob(const char *name,
 -					 mode_t mode,
 +					 umode_t mode,
  					 struct dentry *parent,
  					 struct debugfs_blob_wrapper *blob)
  {
@@@ -396,7 -388,7 +388,7 @@@ static ssize_t wil_write_file_reset(str
  
  static const struct file_operations fops_reset = {
  	.write = wil_write_file_reset,
- 	.open  = wil_default_open,
+ 	.open  = simple_open,
  };
  /*---------Tx descriptor------------*/
  
@@@ -526,7 -518,50 +518,50 @@@ static ssize_t wil_write_file_ssid(stru
  static const struct file_operations fops_ssid = {
  	.read = wil_read_file_ssid,
  	.write = wil_write_file_ssid,
- 	.open  = wil_default_open,
+ 	.open  = simple_open,
+ };
+ 
+ /*---------temp------------*/
+ static void print_temp(struct seq_file *s, const char *prefix, u32 t)
+ {
+ 	switch (t) {
+ 	case 0:
+ 	case ~(u32)0:
+ 		seq_printf(s, "%s N/A\n", prefix);
+ 	break;
+ 	default:
+ 		seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000);
+ 		break;
+ 	}
+ }
+ 
+ static int wil_temp_debugfs_show(struct seq_file *s, void *data)
+ {
+ 	struct wil6210_priv *wil = s->private;
+ 	u32 t_m, t_r;
+ 
+ 	int rc = wmi_get_temperature(wil, &t_m, &t_r);
+ 	if (rc) {
+ 		seq_printf(s, "Failed\n");
+ 		return 0;
+ 	}
+ 
+ 	print_temp(s, "MAC temperature   :", t_m);
+ 	print_temp(s, "Radio temperature :", t_r);
+ 
+ 	return 0;
+ }
+ 
+ static int wil_temp_seq_open(struct inode *inode, struct file *file)
+ {
+ 	return single_open(file, wil_temp_debugfs_show, inode->i_private);
+ }
+ 
+ static const struct file_operations fops_temp = {
+ 	.open		= wil_temp_seq_open,
+ 	.release	= single_release,
+ 	.read		= seq_read,
+ 	.llseek		= seq_lseek,
  };
  
  /*----------------*/
@@@ -563,6 -598,7 +598,7 @@@ int wil6210_debugfs_init(struct wil6210
  	debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
  
  	debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
+ 	debugfs_create_file("temp", S_IRUGO, dbg, wil, &fops_temp);
  
  	wil->rgf_blob.data = (void * __force)wil->csr + 0;
  	wil->rgf_blob.size = 0xa000;
diff --combined drivers/net/wireless/ray_cs.c
index a6f660c,ebada81..9b557a1
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@@ -144,7 -144,7 +144,7 @@@ static int psm
  static char *essid;
  
  /* Default to encapsulation unless translation requested */
- static int translate = 1;
+ static bool translate = 1;
  
  static int country = USA;
  
@@@ -178,7 -178,7 +178,7 @@@ module_param(hop_dwell, int, 0)
  module_param(beacon_period, int, 0);
  module_param(psm, int, 0);
  module_param(essid, charp, 0);
- module_param(translate, int, 0);
+ module_param(translate, bool, 0);
  module_param(country, int, 0);
  module_param(sniffer, int, 0);
  module_param(bc, int, 0);
@@@ -953,7 -953,7 +953,7 @@@ static int translate_frame(ray_dev_t *l
  			   unsigned char *data, int len)
  {
  	__be16 proto = ((struct ethhdr *)data)->h_proto;
- 	if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */
+ 	if (ntohs(proto) >= ETH_P_802_3_MIN) { /* DIX II ethernet frame */
  		pr_debug("ray_cs translate_frame DIX II\n");
  		/* Copy LLC header to card buffer */
  		memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc));
@@@ -1353,7 -1353,7 +1353,7 @@@ static int ray_get_range(struct net_dev
  static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
  			   union iwreq_data *wrqu, char *extra)
  {
- 	translate = *(extra);	/* Set framing mode */
+ 	translate = !!*(extra);	/* Set framing mode */
  
  	return 0;
  }
@@@ -2778,7 -2778,7 +2778,7 @@@ static ssize_t int_proc_write(struct fi
  		nr = nr * 10 + c;
  		p++;
  	} while (--len);
 -	*(int *)PDE(file_inode(file))->data = nr;
 +	*(int *)PDE_DATA(file_inode(file)) = nr;
  	return count;
  }
  
diff --combined drivers/net/wireless/rt2x00/rt2x00mmio.c
index d84a680,64b06c6..9acc388
--- a/drivers/net/wireless/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c
@@@ -34,10 -34,10 +34,10 @@@
  /*
   * Register access.
   */
- int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
- 			   const unsigned int offset,
- 			   const struct rt2x00_field32 field,
- 			   u32 *reg)
+ int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev,
+ 			    const unsigned int offset,
+ 			    const struct rt2x00_field32 field,
+ 			    u32 *reg)
  {
  	unsigned int i;
  
@@@ -45,7 -45,7 +45,7 @@@
  		return 0;
  
  	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- 		rt2x00pci_register_read(rt2x00dev, offset, reg);
+ 		rt2x00mmio_register_read(rt2x00dev, offset, reg);
  		if (!rt2x00_get_field32(*reg, field))
  			return 1;
  		udelay(REGISTER_BUSY_DELAY);
@@@ -57,13 -57,13 +57,13 @@@
  
  	return 0;
  }
- EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
+ EXPORT_SYMBOL_GPL(rt2x00mmio_regbusy_read);
  
- bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
+ bool rt2x00mmio_rxdone(struct rt2x00_dev *rt2x00dev)
  {
  	struct data_queue *queue = rt2x00dev->rx;
  	struct queue_entry *entry;
- 	struct queue_entry_priv_pci *entry_priv;
+ 	struct queue_entry_priv_mmio *entry_priv;
  	struct skb_frame_desc *skbdesc;
  	int max_rx = 16;
  
@@@ -96,24 -96,24 +96,24 @@@
  
  	return !max_rx;
  }
- EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
+ EXPORT_SYMBOL_GPL(rt2x00mmio_rxdone);
  
- void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
+ void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop)
  {
  	unsigned int i;
  
  	for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
  		msleep(10);
  }
- EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
+ EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue);
  
  /*
   * Device initialization handlers.
   */
- static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
- 				     struct data_queue *queue)
+ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
+ 				      struct data_queue *queue)
  {
- 	struct queue_entry_priv_pci *entry_priv;
+ 	struct queue_entry_priv_mmio *entry_priv;
  	void *addr;
  	dma_addr_t dma;
  	unsigned int i;
@@@ -123,12 -123,12 +123,10 @@@
  	 */
  	addr = dma_alloc_coherent(rt2x00dev->dev,
  				  queue->limit * queue->desc_size,
--				  &dma, GFP_KERNEL);
++				  &dma, GFP_KERNEL | __GFP_ZERO);
  	if (!addr)
  		return -ENOMEM;
  
--	memset(addr, 0, queue->limit * queue->desc_size);
--
  	/*
  	 * Initialize all queue entries to contain valid addresses.
  	 */
@@@ -141,10 -141,10 +139,10 @@@
  	return 0;
  }
  
- static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
- 				     struct data_queue *queue)
+ static void rt2x00mmio_free_queue_dma(struct rt2x00_dev *rt2x00dev,
+ 				      struct data_queue *queue)
  {
- 	struct queue_entry_priv_pci *entry_priv =
+ 	struct queue_entry_priv_mmio *entry_priv =
  	    queue->entries[0].priv_data;
  
  	if (entry_priv->desc)
@@@ -154,7 -154,7 +152,7 @@@
  	entry_priv->desc = NULL;
  }
  
- int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
+ int rt2x00mmio_initialize(struct rt2x00_dev *rt2x00dev)
  {
  	struct data_queue *queue;
  	int status;
@@@ -163,7 -163,7 +161,7 @@@
  	 * Allocate DMA
  	 */
  	queue_for_each(rt2x00dev, queue) {
- 		status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
+ 		status = rt2x00mmio_alloc_queue_dma(rt2x00dev, queue);
  		if (status)
  			goto exit;
  	}
@@@ -175,8 -175,8 +173,8 @@@
  			     rt2x00dev->ops->lib->irq_handler,
  			     IRQF_SHARED, rt2x00dev->name, rt2x00dev);
  	if (status) {
- 		ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
- 		      rt2x00dev->irq, status);
+ 		rt2x00_err(rt2x00dev, "IRQ %d allocation failed (error %d)\n",
+ 			   rt2x00dev->irq, status);
  		goto exit;
  	}
  
@@@ -184,13 -184,13 +182,13 @@@
  
  exit:
  	queue_for_each(rt2x00dev, queue)
- 		rt2x00pci_free_queue_dma(rt2x00dev, queue);
+ 		rt2x00mmio_free_queue_dma(rt2x00dev, queue);
  
  	return status;
  }
- EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
+ EXPORT_SYMBOL_GPL(rt2x00mmio_initialize);
  
- void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
+ void rt2x00mmio_uninitialize(struct rt2x00_dev *rt2x00dev)
  {
  	struct data_queue *queue;
  
@@@ -203,9 -203,9 +201,9 @@@
  	 * Free DMA
  	 */
  	queue_for_each(rt2x00dev, queue)
- 		rt2x00pci_free_queue_dma(rt2x00dev, queue);
+ 		rt2x00mmio_free_queue_dma(rt2x00dev, queue);
  }
- EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
+ EXPORT_SYMBOL_GPL(rt2x00mmio_uninitialize);
  
  /*
   * rt2x00mmio module information.
diff --combined drivers/scsi/csiostor/csio_hw.c
index 7dbaf58,a0b4c89..1936055
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@@ -61,7 -61,7 +61,7 @@@ int csio_msi = 2
  static int dev_num;
  
  /* FCoE Adapter types & its description */
- static const struct csio_adap_desc csio_fcoe_adapters[] = {
+ static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
  	{"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
  	{"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
  	{"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
@@@ -77,7 -77,38 +77,38 @@@
  	{"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
  	{"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
  	{"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
- 	{"T4 FPGA", "Chelsio T4 FPGA [FCoE]"}
+ 	{"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
+ 	{"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
+ 	{"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
+ 	{"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
+ 	{"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
+ 	{"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
+ 	{"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
+ 	{"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
+ 	{"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
+ };
+ 
+ static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
+ 	{"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
+ 	{"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
+ 	{"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
+ 	{"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
+ 	{"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
+ 	{"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
+ 	{"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
+ 	{"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
+ 	{"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
+ 	{"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
+ 	{"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
+ 	{"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
+ 	{"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
+ 	{"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
+ 	{"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
+ 	{"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
+ 	{"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
+ 	{"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
+ 	{"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
+ 	{"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
  };
  
  static void csio_mgmtm_cleanup(struct csio_mgmtm *);
@@@ -124,7 -155,7 +155,7 @@@ int csio_is_hw_removing(struct csio_hw 
   *	at the time it indicated completion is stored there.  Returns 0 if the
   *	operation completes and	-EAGAIN	otherwise.
   */
- static int
+ int
  csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
  			 int polarity, int attempts, int delay, uint32_t *valp)
  {
@@@ -145,6 -176,24 +176,24 @@@
  	}
  }
  
+ /*
+  *	csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+  *	@hw: the adapter
+  *	@addr: the indirect TP register address
+  *	@mask: specifies the field within the register to modify
+  *	@val: new value for the field
+  *
+  *	Sets a field of an indirect TP register to the given value.
+  */
+ void
+ csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
+ 			unsigned int mask, unsigned int val)
+ {
+ 	csio_wr_reg32(hw, addr, TP_PIO_ADDR);
+ 	val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
+ 	csio_wr_reg32(hw, val, TP_PIO_DATA);
+ }
+ 
  void
  csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
  		   uint32_t value)
@@@ -157,242 -206,22 +206,22 @@@
  
  }
  
  static int
  csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
  {
- 	return csio_memory_rw(hw, mtype, addr, len, buf, 0);
+ 	return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
+ 					    addr, len, buf, 0);
  }
  
  /*
   * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
   */
- #define EEPROM_MAX_RD_POLL 40
- #define EEPROM_MAX_WR_POLL 6
- #define EEPROM_STAT_ADDR   0x7bfc
- #define VPD_BASE           0x400
- #define VPD_BASE_OLD	   0
- #define VPD_LEN            512
+ #define EEPROM_MAX_RD_POLL	40
+ #define EEPROM_MAX_WR_POLL	6
+ #define EEPROM_STAT_ADDR	0x7bfc
+ #define VPD_BASE		0x400
+ #define VPD_BASE_OLD		0
+ #define VPD_LEN			1024
  #define VPD_INFO_FLD_HDR_SIZE	3
  
  /*
@@@ -817,23 -646,6 +646,6 @@@ out
  	return 0;
  }
  
- /*
-  *	csio_hw_flash_cfg_addr - return the address of the flash
-  *				configuration file
-  *	@hw: the HW module
-  *
-  *	Return the address within the flash where the Firmware Configuration
-  *	File is stored.
-  */
- static unsigned int
- csio_hw_flash_cfg_addr(struct csio_hw *hw)
- {
- 	if (hw->params.sf_size == 0x100000)
- 		return FPGA_FLASH_CFG_OFFSET;
- 	else
- 		return FLASH_CFG_OFFSET;
- }
- 
  static void
  csio_hw_print_fw_version(struct csio_hw *hw, char *str)
  {
@@@ -898,13 -710,13 +710,13 @@@ csio_hw_check_fw_version(struct csio_h
  	minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
  	micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
  
- 	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
+ 	if (major != FW_VERSION_MAJOR(hw)) {	/* major mismatch - fail */
  		csio_err(hw, "card FW has major version %u, driver wants %u\n",
- 			 major, FW_VERSION_MAJOR);
+ 			 major, FW_VERSION_MAJOR(hw));
  		return -EINVAL;
  	}
  
- 	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ 	if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
  		return 0;        /* perfect match */
  
  	/* Minor/micro version mismatch */
@@@ -1044,7 -856,7 +856,7 @@@ static voi
  csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
  {
  	uint16_t val;
- 	uint32_t pcie_cap;
+ 	int pcie_cap;
  
  	if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
  		pci_read_config_word(hw->pdev,
@@@ -1056,84 -868,6 +868,6 @@@
  	}
  }
  
- 
- /*
-  * Return the specified PCI-E Configuration Space register from our Physical
-  * Function.  We try first via a Firmware LDST Command since we prefer to let
-  * the firmware own all of these registers, but if that fails we go for it
-  * directly ourselves.
-  */
- static uint32_t
- csio_read_pcie_cfg4(struct csio_hw *hw, int reg)
- {
- 	u32 val = 0;
- 	struct csio_mb *mbp;
- 	int rv;
- 	struct fw_ldst_cmd *ldst_cmd;
- 
- 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
- 	if (!mbp) {
- 		CSIO_INC_STATS(hw, n_err_nomem);
- 		pci_read_config_dword(hw->pdev, reg, &val);
- 		return val;
- 	}
- 
- 	csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
- 
- 	rv = csio_mb_issue(hw, mbp);
- 
- 	/*
- 	 * If the LDST Command suucceeded, exctract the returned register
- 	 * value.  Otherwise read it directly ourself.
- 	 */
- 	if (rv == 0) {
- 		ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
- 		val = ntohl(ldst_cmd->u.pcie.data[0]);
- 	} else
- 		pci_read_config_dword(hw->pdev, reg, &val);
- 
- 	mempool_free(mbp, hw->mb_mempool);
- 
- 	return val;
- } /* csio_read_pcie_cfg4 */
- 
- static int
- csio_hw_set_mem_win(struct csio_hw *hw)
- {
- 	u32 bar0;
- 
- 	/*
- 	 * Truncation intentional: we only read the bottom 32-bits of the
- 	 * 64-bit BAR0/BAR1 ...  We use the hardware backdoor mechanism to
- 	 * read BAR0 instead of using pci_resource_start() because we could be
- 	 * operating from within a Virtual Machine which is trapping our
- 	 * accesses to our Configuration Space and we need to set up the PCI-E
- 	 * Memory Window decoders with the actual addresses which will be
- 	 * coming across the PCI-E link.
- 	 */
- 	bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
- 	bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
- 
- 	/*
- 	 * Set up memory window for accessing adapter memory ranges.  (Read
- 	 * back MA register to ensure that changes propagate before we attempt
- 	 * to use the new values.)
- 	 */
- 	csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) |
- 		WINDOW(ilog2(MEMWIN0_APERTURE) - 10),
- 		PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0));
- 	csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) |
- 		WINDOW(ilog2(MEMWIN1_APERTURE) - 10),
- 		PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1));
- 	csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) |
- 		WINDOW(ilog2(MEMWIN2_APERTURE) - 10),
- 		PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
- 	csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
- 	return 0;
- } /* csio_hw_set_mem_win */
- 
- 
- 
  /*****************************************************************************/
  /* HW State machine assists                                                  */
  /*****************************************************************************/
@@@ -1234,7 -968,9 +968,9 @@@ retry
  		for (;;) {
  			uint32_t pcie_fw;
  
+ 			spin_unlock_irq(&hw->lock);
  			msleep(50);
+ 			spin_lock_irq(&hw->lock);
  			waiting -= 50;
  
  			/*
@@@ -2121,9 -1857,9 +1857,9 @@@ csio_hw_flash_config(struct csio_hw *hw
  	uint32_t *cfg_data;
  	int value_to_add = 0;
  
- 	if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) {
- 		csio_err(hw, "could not find config file " CSIO_CF_FNAME
- 			 ",err: %d\n", ret);
+ 	if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
+ 		csio_err(hw, "could not find config file %s, err: %d\n",
+ 			 CSIO_CF_FNAME(hw), ret);
  		return -ENOENT;
  	}
  
@@@ -2147,9 -1883,24 +1883,24 @@@
  
  	ret = csio_memory_write(hw, mtype, maddr,
  				cf->size + value_to_add, cfg_data);
+ 
+ 	if ((ret == 0) && (value_to_add != 0)) {
+ 		union {
+ 			u32 word;
+ 			char buf[4];
+ 		} last;
+ 		size_t size = cf->size & ~0x3;
+ 		int i;
+ 
+ 		last.word = cfg_data[size >> 2];
+ 		for (i = value_to_add; i < 4; i++)
+ 			last.buf[i] = 0;
+ 		ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
+ 	}
  	if (ret == 0) {
- 		csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n");
- 		strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
+ 		csio_info(hw, "config file upgraded to %s\n",
+ 			  CSIO_CF_FNAME(hw));
+ 		snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
  	}
  
  leave:
@@@ -2179,7 -1930,7 +1930,7 @@@ csio_hw_use_fwconfig(struct csio_hw *hw
  {
  	unsigned int mtype, maddr;
  	int rv;
- 	uint32_t finiver, finicsum, cfcsum;
+ 	uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
  	int using_flash;
  	char path[64];
  
@@@ -2207,7 -1958,7 +1958,7 @@@
  			 * config file from flash.
  			 */
  			mtype = FW_MEMTYPE_CF_FLASH;
- 			maddr = csio_hw_flash_cfg_addr(hw);
+ 			maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
  			using_flash = 1;
  		} else {
  			/*
@@@ -2346,30 -2097,32 +2097,32 @@@ csio_hw_flash_fw(struct csio_hw *hw
  	struct pci_dev *pci_dev = hw->pdev;
  	struct device *dev = &pci_dev->dev ;
  
- 	if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) {
- 		csio_err(hw, "could not find firmware image " CSIO_FW_FNAME
- 		",err: %d\n", ret);
+ 	if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
+ 		csio_err(hw, "could not find firmware image %s, err: %d\n",
+ 			 CSIO_FW_FNAME(hw), ret);
  		return -EINVAL;
  	}
  
  	hdr = (const struct fw_hdr *)fw->data;
  	fw_ver = ntohl(hdr->fw_ver);
- 	if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR)
+ 	if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw))
  		return -EINVAL;      /* wrong major version, won't do */
  
  	/*
  	 * If the flash FW is unusable or we found something newer, load it.
  	 */
- 	if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR ||
+ 	if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
  	    fw_ver > hw->fwrev) {
  		ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
  				    /*force=*/false);
  		if (!ret)
- 			csio_info(hw, "firmware upgraded to version %pI4 from "
- 				  CSIO_FW_FNAME "\n", &hdr->fw_ver);
+ 			csio_info(hw,
+ 				  "firmware upgraded to version %pI4 from %s\n",
+ 				  &hdr->fw_ver, CSIO_FW_FNAME(hw));
  		else
  			csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
- 	}
+ 	} else
+ 		ret = -EINVAL;
  
  	release_firmware(fw);
  
@@@ -2410,7 -2163,7 +2163,7 @@@ csio_hw_configure(struct csio_hw *hw
  	/* Set pci completion timeout value to 4 seconds. */
  	csio_set_pcie_completion_timeout(hw, 0xd);
  
- 	csio_hw_set_mem_win(hw);
+ 	hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
  
  	rv = csio_hw_get_fw_version(hw, &hw->fwrev);
  	if (rv != 0)
@@@ -2478,6 -2231,8 +2231,8 @@@
  	} else {
  		if (hw->fw_state == CSIO_DEV_STATE_INIT) {
  
+ 			hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
+ 
  			/* device parameters */
  			rv = csio_get_device_params(hw);
  			if (rv != 0)
@@@ -2651,7 -2406,7 +2406,7 @@@ csio_hw_intr_disable(struct csio_hw *hw
  
  }
  
- static void
+ void
  csio_hw_fatal_err(struct csio_hw *hw)
  {
  	csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
@@@ -2990,14 -2745,6 +2745,6 @@@ csio_hws_pcierr(struct csio_hw *hw, enu
  /* END: HW SM                                                                */
  /*****************************************************************************/
  
- /* Slow path handlers */
- struct intr_info {
- 	unsigned int mask;       /* bits to check in interrupt status */
- 	const char *msg;         /* message to print or NULL */
- 	short stat_idx;          /* stat counter to increment or -1 */
- 	unsigned short fatal;    /* whether the condition reported is fatal */
- };
- 
  /*
   *	csio_handle_intr_status - table driven interrupt handler
   *	@hw: HW instance
@@@ -3011,7 -2758,7 +2758,7 @@@
   *	by an entry specifying mask 0.  Returns the number of fatal interrupt
   *	conditions.
   */
- static int
+ int
  csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
  				 const struct intr_info *acts)
  {
@@@ -3038,80 -2785,6 +2785,6 @@@
  }
  
  /*
-  * Interrupt handler for the PCIE module.
-  */
- static void
- csio_pcie_intr_handler(struct csio_hw *hw)
- {
- 	static struct intr_info sysbus_intr_info[] = {
- 		{ RNPP, "RXNP array parity error", -1, 1 },
- 		{ RPCP, "RXPC array parity error", -1, 1 },
- 		{ RCIP, "RXCIF array parity error", -1, 1 },
- 		{ RCCP, "Rx completions control array parity error", -1, 1 },
- 		{ RFTP, "RXFT array parity error", -1, 1 },
- 		{ 0, NULL, 0, 0 }
- 	};
- 	static struct intr_info pcie_port_intr_info[] = {
- 		{ TPCP, "TXPC array parity error", -1, 1 },
- 		{ TNPP, "TXNP array parity error", -1, 1 },
- 		{ TFTP, "TXFT array parity error", -1, 1 },
- 		{ TCAP, "TXCA array parity error", -1, 1 },
- 		{ TCIP, "TXCIF array parity error", -1, 1 },
- 		{ RCAP, "RXCA array parity error", -1, 1 },
- 		{ OTDD, "outbound request TLP discarded", -1, 1 },
- 		{ RDPE, "Rx data parity error", -1, 1 },
- 		{ TDUE, "Tx uncorrectable data error", -1, 1 },
- 		{ 0, NULL, 0, 0 }
- 	};
- 	static struct intr_info pcie_intr_info[] = {
- 		{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
- 		{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
- 		{ MSIDATAPERR, "MSI data parity error", -1, 1 },
- 		{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- 		{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- 		{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- 		{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- 		{ PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
- 		{ PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
- 		{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- 		{ CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
- 		{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- 		{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- 		{ DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
- 		{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- 		{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- 		{ HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
- 		{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- 		{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- 		{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- 		{ FIDPERR, "PCI FID parity error", -1, 1 },
- 		{ INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
- 		{ MATAGPERR, "PCI MA tag parity error", -1, 1 },
- 		{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- 		{ RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
- 		{ RXWRPERR, "PCI Rx write parity error", -1, 1 },
- 		{ RPLPERR, "PCI replay buffer parity error", -1, 1 },
- 		{ PCIESINT, "PCI core secondary fault", -1, 1 },
- 		{ PCIEPINT, "PCI core primary fault", -1, 1 },
- 		{ UNXSPLCPLERR, "PCI unexpected split completion error", -1,
- 		  0 },
- 		{ 0, NULL, 0, 0 }
- 	};
- 
- 	int fat;
- 
- 	fat = csio_handle_intr_status(hw,
- 				    PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- 				    sysbus_intr_info) +
- 	      csio_handle_intr_status(hw,
- 				    PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- 				    pcie_port_intr_info) +
- 	      csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
- 	if (fat)
- 		csio_hw_fatal_err(hw);
- }
- 
- /*
   * TP interrupt handler.
   */
  static void csio_tp_intr_handler(struct csio_hw *hw)
@@@ -3517,7 -3190,7 +3190,7 @@@ static void csio_ncsi_intr_handler(stru
   */
  static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
  {
- 	uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ 	uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
  
  	v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
  	if (!v)
@@@ -3527,7 -3200,7 +3200,7 @@@
  		csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
  	if (v & RXFIFO_PRTY_ERR)
  		csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
- 	csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ 	csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
  	csio_hw_fatal_err(hw);
  }
  
@@@ -3596,7 -3269,7 +3269,7 @@@ csio_hw_slow_intr_handler(struct csio_h
  		csio_xgmac_intr_handler(hw, 3);
  
  	if (cause & PCIE)
- 		csio_pcie_intr_handler(hw);
+ 		hw->chip_ops->chip_pcie_intr_handler(hw);
  
  	if (cause & MC)
  		csio_mem_intr_handler(hw, MEM_MC);
@@@ -3892,6 -3565,7 +3565,6 @@@ csio_process_fwevtq_entry(struct csio_h
  			  struct csio_fl_dma_buf *flb, void *priv)
  {
  	__u8 op;
 -	__be64 *data;
  	void *msg = NULL;
  	uint32_t msg_len = 0;
  	bool msg_sg = 0;
@@@ -3907,6 -3581,8 +3580,6 @@@
  		msg = (void *) flb;
  		msg_len = flb->totlen;
  		msg_sg = 1;
 -
 -		data = (__be64 *) msg;
  	} else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
  
  		CSIO_INC_STATS(hw, n_cpl_fw6_msg);
@@@ -3914,6 -3590,8 +3587,6 @@@
  		msg = (void *)((uintptr_t)wr + sizeof(__be64));
  		msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
  			   sizeof(struct cpl_fw4_msg);
 -
 -		data = (__be64 *) msg;
  	} else {
  		csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
  		CSIO_INC_STATS(hw, n_cpl_unexp);
@@@ -4257,6 -3935,7 +3930,7 @@@ csio_hw_get_device_id(struct csio_hw *h
  			     &hw->params.pci.device_id);
  
  	csio_dev_id_cached(hw);
+ 	hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
  
  } /* csio_hw_get_device_id */
  
@@@ -4275,19 -3954,21 +3949,21 @@@ csio_hw_set_description(struct csio_hw 
  		prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
  		adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
  
- 		if (prot_type == CSIO_FPGA) {
+ 		if (prot_type == CSIO_T4_FCOE_ASIC) {
+ 			memcpy(hw->hw_ver,
+ 			       csio_t4_fcoe_adapters[adap_type].model_no, 16);
  			memcpy(hw->model_desc,
- 				csio_fcoe_adapters[13].description, 32);
- 		} else if (prot_type == CSIO_T4_FCOE_ASIC) {
+ 			       csio_t4_fcoe_adapters[adap_type].description,
+ 			       32);
+ 		} else if (prot_type == CSIO_T5_FCOE_ASIC) {
  			memcpy(hw->hw_ver,
- 			       csio_fcoe_adapters[adap_type].model_no, 16);
+ 			       csio_t5_fcoe_adapters[adap_type].model_no, 16);
  			memcpy(hw->model_desc,
- 				csio_fcoe_adapters[adap_type].description, 32);
+ 			       csio_t5_fcoe_adapters[adap_type].description,
+ 			       32);
  		} else {
  			char tempName[32] = "Chelsio FCoE Controller";
  			memcpy(hw->model_desc, tempName, 32);
- 
- 			CSIO_DB_ASSERT(0);
  		}
  	}
  } /* csio_hw_set_description */
@@@ -4316,6 -3997,9 +3992,9 @@@ csio_hw_init(struct csio_hw *hw
  
  	strcpy(hw->name, CSIO_HW_NAME);
  
+ 	/* Initialize the HW chip ops with T4/T5 specific ops */
+ 	hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
+ 
  	/* Set the model & its description */
  
  	ven_id = hw->params.pci.vendor_id;
diff --combined drivers/scsi/scsi_transport_iscsi.c
index ce06e87,2e38165..47799a3
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@@ -25,7 -25,6 +25,7 @@@
  #include <linux/slab.h>
  #include <linux/bsg-lib.h>
  #include <linux/idr.h>
 +#include <linux/list.h>
  #include <net/tcp.h>
  #include <scsi/scsi.h>
  #include <scsi/scsi_host.h>
@@@ -461,689 -460,6 +461,689 @@@ void iscsi_destroy_iface(struct iscsi_i
  EXPORT_SYMBOL_GPL(iscsi_destroy_iface);
  
  /*
 + * Interface to display flash node params to sysfs
 + */
 +
 +#define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store)	\
 +struct device_attribute dev_attr_##_prefix##_##_name =			\
 +	__ATTR(_name, _mode, _show, _store)
 +
 +/* flash node session attrs show */
 +#define iscsi_flashnode_sess_attr_show(type, name, param)		\
 +static ssize_t								\
 +show_##type##_##name(struct device *dev, struct device_attribute *attr,	\
 +		     char *buf)						\
 +{									\
 +	struct iscsi_bus_flash_session *fnode_sess =			\
 +					iscsi_dev_to_flash_session(dev);\
 +	struct iscsi_transport *t = fnode_sess->transport;		\
 +	return t->get_flashnode_param(fnode_sess, param, buf);		\
 +}									\
 +
 +
 +#define iscsi_flashnode_sess_attr(type, name, param)			\
 +	iscsi_flashnode_sess_attr_show(type, name, param)		\
 +static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO,			\
 +			    show_##type##_##name, NULL);
 +
 +/* Flash node session attributes */
 +
 +iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable,
 +			  ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE);
 +iscsi_flashnode_sess_attr(fnode, discovery_session,
 +			  ISCSI_FLASHNODE_DISCOVERY_SESS);
 +iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE);
 +iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN);
 +iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN);
 +iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN);
 +iscsi_flashnode_sess_attr(fnode, data_seq_in_order,
 +			  ISCSI_FLASHNODE_DATASEQ_INORDER);
 +iscsi_flashnode_sess_attr(fnode, data_pdu_in_order,
 +			  ISCSI_FLASHNODE_PDU_INORDER);
 +iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN);
 +iscsi_flashnode_sess_attr(fnode, discovery_logout,
 +			  ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN);
 +iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN);
 +iscsi_flashnode_sess_attr(fnode, discovery_auth_optional,
 +			  ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL);
 +iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL);
 +iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST);
 +iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT);
 +iscsi_flashnode_sess_attr(fnode, def_time2retain,
 +			  ISCSI_FLASHNODE_DEF_TIME2RETAIN);
 +iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T);
 +iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID);
 +iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID);
 +iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST);
 +iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo,
 +			  ISCSI_FLASHNODE_DEF_TASKMGMT_TMO);
 +iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS);
 +iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME);
 +iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT);
 +iscsi_flashnode_sess_attr(fnode, discovery_parent_idx,
 +			  ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX);
 +iscsi_flashnode_sess_attr(fnode, discovery_parent_type,
 +			  ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE);
 +iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX);
 +iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX);
 +iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME);
 +iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN);
 +iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD);
 +iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN);
 +iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT);
 +
 +static struct attribute *iscsi_flashnode_sess_attrs[] = {
 +	&dev_attr_fnode_auto_snd_tgt_disable.attr,
 +	&dev_attr_fnode_discovery_session.attr,
 +	&dev_attr_fnode_portal_type.attr,
 +	&dev_attr_fnode_entry_enable.attr,
 +	&dev_attr_fnode_immediate_data.attr,
 +	&dev_attr_fnode_initial_r2t.attr,
 +	&dev_attr_fnode_data_seq_in_order.attr,
 +	&dev_attr_fnode_data_pdu_in_order.attr,
 +	&dev_attr_fnode_chap_auth.attr,
 +	&dev_attr_fnode_discovery_logout.attr,
 +	&dev_attr_fnode_bidi_chap.attr,
 +	&dev_attr_fnode_discovery_auth_optional.attr,
 +	&dev_attr_fnode_erl.attr,
 +	&dev_attr_fnode_first_burst_len.attr,
 +	&dev_attr_fnode_def_time2wait.attr,
 +	&dev_attr_fnode_def_time2retain.attr,
 +	&dev_attr_fnode_max_outstanding_r2t.attr,
 +	&dev_attr_fnode_isid.attr,
 +	&dev_attr_fnode_tsid.attr,
 +	&dev_attr_fnode_max_burst_len.attr,
 +	&dev_attr_fnode_def_taskmgmt_tmo.attr,
 +	&dev_attr_fnode_targetalias.attr,
 +	&dev_attr_fnode_targetname.attr,
 +	&dev_attr_fnode_tpgt.attr,
 +	&dev_attr_fnode_discovery_parent_idx.attr,
 +	&dev_attr_fnode_discovery_parent_type.attr,
 +	&dev_attr_fnode_chap_in_idx.attr,
 +	&dev_attr_fnode_chap_out_idx.attr,
 +	&dev_attr_fnode_username.attr,
 +	&dev_attr_fnode_username_in.attr,
 +	&dev_attr_fnode_password.attr,
 +	&dev_attr_fnode_password_in.attr,
 +	&dev_attr_fnode_is_boot_target.attr,
 +	NULL,
 +};
 +
 +static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj,
 +						    struct attribute *attr,
 +						    int i)
 +{
 +	struct device *dev = container_of(kobj, struct device, kobj);
 +	struct iscsi_bus_flash_session *fnode_sess =
 +						iscsi_dev_to_flash_session(dev);
 +	struct iscsi_transport *t = fnode_sess->transport;
 +	int param;
 +
 +	if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) {
 +		param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE;
 +	} else if (attr == &dev_attr_fnode_discovery_session.attr) {
 +		param = ISCSI_FLASHNODE_DISCOVERY_SESS;
 +	} else if (attr == &dev_attr_fnode_portal_type.attr) {
 +		param = ISCSI_FLASHNODE_PORTAL_TYPE;
 +	} else if (attr == &dev_attr_fnode_entry_enable.attr) {
 +		param = ISCSI_FLASHNODE_ENTRY_EN;
 +	} else if (attr == &dev_attr_fnode_immediate_data.attr) {
 +		param = ISCSI_FLASHNODE_IMM_DATA_EN;
 +	} else if (attr == &dev_attr_fnode_initial_r2t.attr) {
 +		param = ISCSI_FLASHNODE_INITIAL_R2T_EN;
 +	} else if (attr == &dev_attr_fnode_data_seq_in_order.attr) {
 +		param = ISCSI_FLASHNODE_DATASEQ_INORDER;
 +	} else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) {
 +		param = ISCSI_FLASHNODE_PDU_INORDER;
 +	} else if (attr == &dev_attr_fnode_chap_auth.attr) {
 +		param = ISCSI_FLASHNODE_CHAP_AUTH_EN;
 +	} else if (attr == &dev_attr_fnode_discovery_logout.attr) {
 +		param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN;
 +	} else if (attr == &dev_attr_fnode_bidi_chap.attr) {
 +		param = ISCSI_FLASHNODE_BIDI_CHAP_EN;
 +	} else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) {
 +		param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL;
 +	} else if (attr == &dev_attr_fnode_erl.attr) {
 +		param = ISCSI_FLASHNODE_ERL;
 +	} else if (attr == &dev_attr_fnode_first_burst_len.attr) {
 +		param = ISCSI_FLASHNODE_FIRST_BURST;
 +	} else if (attr == &dev_attr_fnode_def_time2wait.attr) {
 +		param = ISCSI_FLASHNODE_DEF_TIME2WAIT;
 +	} else if (attr == &dev_attr_fnode_def_time2retain.attr) {
 +		param = ISCSI_FLASHNODE_DEF_TIME2RETAIN;
 +	} else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) {
 +		param = ISCSI_FLASHNODE_MAX_R2T;
 +	} else if (attr == &dev_attr_fnode_isid.attr) {
 +		param = ISCSI_FLASHNODE_ISID;
 +	} else if (attr == &dev_attr_fnode_tsid.attr) {
 +		param = ISCSI_FLASHNODE_TSID;
 +	} else if (attr == &dev_attr_fnode_max_burst_len.attr) {
 +		param = ISCSI_FLASHNODE_MAX_BURST;
 +	} else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) {
 +		param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO;
 +	} else if (attr == &dev_attr_fnode_targetalias.attr) {
 +		param = ISCSI_FLASHNODE_ALIAS;
 +	} else if (attr == &dev_attr_fnode_targetname.attr) {
 +		param = ISCSI_FLASHNODE_NAME;
 +	} else if (attr == &dev_attr_fnode_tpgt.attr) {
 +		param = ISCSI_FLASHNODE_TPGT;
 +	} else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) {
 +		param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX;
 +	} else if (attr == &dev_attr_fnode_discovery_parent_type.attr) {
 +		param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE;
 +	} else if (attr == &dev_attr_fnode_chap_in_idx.attr) {
 +		param = ISCSI_FLASHNODE_CHAP_IN_IDX;
 +	} else if (attr == &dev_attr_fnode_chap_out_idx.attr) {
 +		param = ISCSI_FLASHNODE_CHAP_OUT_IDX;
 +	} else if (attr == &dev_attr_fnode_username.attr) {
 +		param = ISCSI_FLASHNODE_USERNAME;
 +	} else if (attr == &dev_attr_fnode_username_in.attr) {
 +		param = ISCSI_FLASHNODE_USERNAME_IN;
 +	} else if (attr == &dev_attr_fnode_password.attr) {
 +		param = ISCSI_FLASHNODE_PASSWORD;
 +	} else if (attr == &dev_attr_fnode_password_in.attr) {
 +		param = ISCSI_FLASHNODE_PASSWORD_IN;
 +	} else if (attr == &dev_attr_fnode_is_boot_target.attr) {
 +		param = ISCSI_FLASHNODE_IS_BOOT_TGT;
 +	} else {
 +		WARN_ONCE(1, "Invalid flashnode session attr");
 +		return 0;
 +	}
 +
 +	return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
 +}
 +
 +static struct attribute_group iscsi_flashnode_sess_attr_group = {
 +	.attrs = iscsi_flashnode_sess_attrs,
 +	.is_visible = iscsi_flashnode_sess_attr_is_visible,
 +};
 +
 +static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = {
 +	&iscsi_flashnode_sess_attr_group,
 +	NULL,
 +};
 +
 +static void iscsi_flashnode_sess_release(struct device *dev)
 +{
 +	struct iscsi_bus_flash_session *fnode_sess =
 +						iscsi_dev_to_flash_session(dev);
 +
 +	kfree(fnode_sess->targetname);
 +	kfree(fnode_sess->targetalias);
 +	kfree(fnode_sess->portal_type);
 +	kfree(fnode_sess);
 +}
 +
 +struct device_type iscsi_flashnode_sess_dev_type = {
 +	.name = "iscsi_flashnode_sess_dev_type",
 +	.groups = iscsi_flashnode_sess_attr_groups,
 +	.release = iscsi_flashnode_sess_release,
 +};
 +
 +/* flash node connection attrs show */
 +#define iscsi_flashnode_conn_attr_show(type, name, param)		\
 +static ssize_t								\
 +show_##type##_##name(struct device *dev, struct device_attribute *attr,	\
 +		     char *buf)						\
 +{									\
 +	struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\
 +	struct iscsi_bus_flash_session *fnode_sess =			\
 +				iscsi_flash_conn_to_flash_session(fnode_conn);\
 +	struct iscsi_transport *t = fnode_conn->transport;		\
 +	return t->get_flashnode_param(fnode_sess, param, buf);		\
 +}									\
 +
 +
 +#define iscsi_flashnode_conn_attr(type, name, param)			\
 +	iscsi_flashnode_conn_attr_show(type, name, param)		\
 +static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO,			\
 +			    show_##type##_##name, NULL);
 +
 +/* Flash node connection attributes */
 +
 +iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6,
 +			  ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6);
 +iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN);
 +iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN);
 +iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN);
 +iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat,
 +			  ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT);
 +iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable,
 +			  ISCSI_FLASHNODE_TCP_NAGLE_DISABLE);
 +iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable,
 +			  ISCSI_FLASHNODE_TCP_WSF_DISABLE);
 +iscsi_flashnode_conn_attr(fnode, tcp_timer_scale,
 +			  ISCSI_FLASHNODE_TCP_TIMER_SCALE);
 +iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable,
 +			  ISCSI_FLASHNODE_TCP_TIMESTAMP_EN);
 +iscsi_flashnode_conn_attr(fnode, fragment_disable,
 +			  ISCSI_FLASHNODE_IP_FRAG_DISABLE);
 +iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO);
 +iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT);
 +iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR);
 +iscsi_flashnode_conn_attr(fnode, max_recv_dlength,
 +			  ISCSI_FLASHNODE_MAX_RECV_DLENGTH);
 +iscsi_flashnode_conn_attr(fnode, max_xmit_dlength,
 +			  ISCSI_FLASHNODE_MAX_XMIT_DLENGTH);
 +iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT);
 +iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS);
 +iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC);
 +iscsi_flashnode_conn_attr(fnode, ipv6_flow_label,
 +			  ISCSI_FLASHNODE_IPV6_FLOW_LABEL);
 +iscsi_flashnode_conn_attr(fnode, redirect_ipaddr,
 +			  ISCSI_FLASHNODE_REDIRECT_IPADDR);
 +iscsi_flashnode_conn_attr(fnode, max_segment_size,
 +			  ISCSI_FLASHNODE_MAX_SEGMENT_SIZE);
 +iscsi_flashnode_conn_attr(fnode, link_local_ipv6,
 +			  ISCSI_FLASHNODE_LINK_LOCAL_IPV6);
 +iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF);
 +iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF);
 +iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN);
 +iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN);
 +
 +static struct attribute *iscsi_flashnode_conn_attrs[] = {
 +	&dev_attr_fnode_is_fw_assigned_ipv6.attr,
 +	&dev_attr_fnode_header_digest.attr,
 +	&dev_attr_fnode_data_digest.attr,
 +	&dev_attr_fnode_snack_req.attr,
 +	&dev_attr_fnode_tcp_timestamp_stat.attr,
 +	&dev_attr_fnode_tcp_nagle_disable.attr,
 +	&dev_attr_fnode_tcp_wsf_disable.attr,
 +	&dev_attr_fnode_tcp_timer_scale.attr,
 +	&dev_attr_fnode_tcp_timestamp_enable.attr,
 +	&dev_attr_fnode_fragment_disable.attr,
 +	&dev_attr_fnode_max_recv_dlength.attr,
 +	&dev_attr_fnode_max_xmit_dlength.attr,
 +	&dev_attr_fnode_keepalive_tmo.attr,
 +	&dev_attr_fnode_port.attr,
 +	&dev_attr_fnode_ipaddress.attr,
 +	&dev_attr_fnode_redirect_ipaddr.attr,
 +	&dev_attr_fnode_max_segment_size.attr,
 +	&dev_attr_fnode_local_port.attr,
 +	&dev_attr_fnode_ipv4_tos.attr,
 +	&dev_attr_fnode_ipv6_traffic_class.attr,
 +	&dev_attr_fnode_ipv6_flow_label.attr,
 +	&dev_attr_fnode_link_local_ipv6.attr,
 +	&dev_attr_fnode_tcp_xmit_wsf.attr,
 +	&dev_attr_fnode_tcp_recv_wsf.attr,
 +	&dev_attr_fnode_statsn.attr,
 +	&dev_attr_fnode_exp_statsn.attr,
 +	NULL,
 +};
 +
 +static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj,
 +						    struct attribute *attr,
 +						    int i)
 +{
 +	struct device *dev = container_of(kobj, struct device, kobj);
 +	struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
 +	struct iscsi_transport *t = fnode_conn->transport;
 +	int param;
 +
 +	if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) {
 +		param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6;
 +	} else if (attr == &dev_attr_fnode_header_digest.attr) {
 +		param = ISCSI_FLASHNODE_HDR_DGST_EN;
 +	} else if (attr == &dev_attr_fnode_data_digest.attr) {
 +		param = ISCSI_FLASHNODE_DATA_DGST_EN;
 +	} else if (attr == &dev_attr_fnode_snack_req.attr) {
 +		param = ISCSI_FLASHNODE_SNACK_REQ_EN;
 +	} else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) {
 +		param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT;
 +	} else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) {
 +		param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE;
 +	} else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) {
 +		param = ISCSI_FLASHNODE_TCP_WSF_DISABLE;
 +	} else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) {
 +		param = ISCSI_FLASHNODE_TCP_TIMER_SCALE;
 +	} else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) {
 +		param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN;
 +	} else if (attr == &dev_attr_fnode_fragment_disable.attr) {
 +		param = ISCSI_FLASHNODE_IP_FRAG_DISABLE;
 +	} else if (attr == &dev_attr_fnode_max_recv_dlength.attr) {
 +		param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH;
 +	} else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) {
 +		param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH;
 +	} else if (attr == &dev_attr_fnode_keepalive_tmo.attr) {
 +		param = ISCSI_FLASHNODE_KEEPALIVE_TMO;
 +	} else if (attr == &dev_attr_fnode_port.attr) {
 +		param = ISCSI_FLASHNODE_PORT;
 +	} else if (attr == &dev_attr_fnode_ipaddress.attr) {
 +		param = ISCSI_FLASHNODE_IPADDR;
 +	} else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) {
 +		param = ISCSI_FLASHNODE_REDIRECT_IPADDR;
 +	} else if (attr == &dev_attr_fnode_max_segment_size.attr) {
 +		param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE;
 +	} else if (attr == &dev_attr_fnode_local_port.attr) {
 +		param = ISCSI_FLASHNODE_LOCAL_PORT;
 +	} else if (attr == &dev_attr_fnode_ipv4_tos.attr) {
 +		param = ISCSI_FLASHNODE_IPV4_TOS;
 +	} else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) {
 +		param = ISCSI_FLASHNODE_IPV6_TC;
 +	} else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) {
 +		param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL;
 +	} else if (attr == &dev_attr_fnode_link_local_ipv6.attr) {
 +		param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6;
 +	} else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) {
 +		param = ISCSI_FLASHNODE_TCP_XMIT_WSF;
 +	} else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) {
 +		param = ISCSI_FLASHNODE_TCP_RECV_WSF;
 +	} else if (attr == &dev_attr_fnode_statsn.attr) {
 +		param = ISCSI_FLASHNODE_STATSN;
 +	} else if (attr == &dev_attr_fnode_exp_statsn.attr) {
 +		param = ISCSI_FLASHNODE_EXP_STATSN;
 +	} else {
 +		WARN_ONCE(1, "Invalid flashnode connection attr");
 +		return 0;
 +	}
 +
 +	return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
 +}
 +
 +static struct attribute_group iscsi_flashnode_conn_attr_group = {
 +	.attrs = iscsi_flashnode_conn_attrs,
 +	.is_visible = iscsi_flashnode_conn_attr_is_visible,
 +};
 +
 +static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = {
 +	&iscsi_flashnode_conn_attr_group,
 +	NULL,
 +};
 +
 +static void iscsi_flashnode_conn_release(struct device *dev)
 +{
 +	struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
 +
 +	kfree(fnode_conn->ipaddress);
 +	kfree(fnode_conn->redirect_ipaddr);
 +	kfree(fnode_conn->link_local_ipv6_addr);
 +	kfree(fnode_conn);
 +}
 +
 +struct device_type iscsi_flashnode_conn_dev_type = {
 +	.name = "iscsi_flashnode_conn_dev_type",
 +	.groups = iscsi_flashnode_conn_attr_groups,
 +	.release = iscsi_flashnode_conn_release,
 +};
 +
 +struct bus_type iscsi_flashnode_bus;
 +
 +int iscsi_flashnode_bus_match(struct device *dev,
 +				     struct device_driver *drv)
 +{
 +	if (dev->bus == &iscsi_flashnode_bus)
 +		return 1;
 +	return 0;
 +}
 +EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
 +
 +struct bus_type iscsi_flashnode_bus = {
 +	.name = "iscsi_flashnode",
 +	.match = &iscsi_flashnode_bus_match,
 +};
 +
 +/**
 + * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs
 + * @shost: pointer to host data
 + * @index: index of flashnode to add in sysfs
 + * @transport: pointer to transport data
 + * @dd_size: total size to allocate
 + *
 + * Adds a sysfs entry for the flashnode session attributes
 + *
 + * Returns:
 + *  pointer to allocated flashnode sess on sucess
 + *  %NULL on failure
 + */
 +struct iscsi_bus_flash_session *
 +iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
 +			    struct iscsi_transport *transport,
 +			    int dd_size)
 +{
 +	struct iscsi_bus_flash_session *fnode_sess;
 +	int err;
 +
 +	fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL);
 +	if (!fnode_sess)
 +		return NULL;
 +
 +	fnode_sess->transport = transport;
 +	fnode_sess->target_id = index;
 +	fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type;
 +	fnode_sess->dev.bus = &iscsi_flashnode_bus;
 +	fnode_sess->dev.parent = &shost->shost_gendev;
 +	dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u",
 +		     shost->host_no, index);
 +
 +	err = device_register(&fnode_sess->dev);
 +	if (err)
 +		goto free_fnode_sess;
 +
 +	if (dd_size)
 +		fnode_sess->dd_data = &fnode_sess[1];
 +
 +	return fnode_sess;
 +
 +free_fnode_sess:
 +	kfree(fnode_sess);
 +	return NULL;
 +}
 +EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess);
 +
 +/**
 + * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs
 + * @shost: pointer to host data
 + * @fnode_sess: pointer to the parent flashnode session entry
 + * @transport: pointer to transport data
 + * @dd_size: total size to allocate
 + *
 + * Adds a sysfs entry for the flashnode connection attributes
 + *
 + * Returns:
 + *  pointer to allocated flashnode conn on success
 + *  %NULL on failure
 + */
 +struct iscsi_bus_flash_conn *
 +iscsi_create_flashnode_conn(struct Scsi_Host *shost,
 +			    struct iscsi_bus_flash_session *fnode_sess,
 +			    struct iscsi_transport *transport,
 +			    int dd_size)
 +{
 +	struct iscsi_bus_flash_conn *fnode_conn;
 +	int err;
 +
 +	fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL);
 +	if (!fnode_conn)
 +		return NULL;
 +
 +	fnode_conn->transport = transport;
 +	fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type;
 +	fnode_conn->dev.bus = &iscsi_flashnode_bus;
 +	fnode_conn->dev.parent = &fnode_sess->dev;
 +	dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0",
 +		     shost->host_no, fnode_sess->target_id);
 +
 +	err = device_register(&fnode_conn->dev);
 +	if (err)
 +		goto free_fnode_conn;
 +
 +	if (dd_size)
 +		fnode_conn->dd_data = &fnode_conn[1];
 +
 +	return fnode_conn;
 +
 +free_fnode_conn:
 +	kfree(fnode_conn);
 +	return NULL;
 +}
 +EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
 +
 +/**
 + * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn
 + * @dev: device to verify
 + * @data: pointer to data containing value to use for verification
 + *
 + * Verifies if the passed device is flashnode conn device
 + *
 + * Returns:
 + *  1 on success
 + *  0 on failure
 + */
 +int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
 +{
 +	return dev->bus == &iscsi_flashnode_bus;
 +}
 +EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev);
 +
 +static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
 +{
 +	device_unregister(&fnode_conn->dev);
 +	return 0;
 +}
 +
 +static int flashnode_match_index(struct device *dev, void *data)
 +{
 +	struct iscsi_bus_flash_session *fnode_sess = NULL;
 +	int ret = 0;
 +
 +	if (!iscsi_flashnode_bus_match(dev, NULL))
 +		goto exit_match_index;
 +
 +	fnode_sess = iscsi_dev_to_flash_session(dev);
 +	ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0;
 +
 +exit_match_index:
 +	return ret;
 +}
 +
 +/**
 + * iscsi_get_flashnode_by_index -finds flashnode session entry by index
 + * @shost: pointer to host data
 + * @data: pointer to data containing value to use for comparison
 + * @fn: function pointer that does actual comparison
 + *
 + * Finds the flashnode session object for the passed index
 + *
 + * Returns:
 + *  pointer to found flashnode session object on success
 + *  %NULL on failure
 + */
 +static struct iscsi_bus_flash_session *
 +iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
 +			     int (*fn)(struct device *dev, void *data))
 +{
 +	struct iscsi_bus_flash_session *fnode_sess = NULL;
 +	struct device *dev;
 +
 +	dev = device_find_child(&shost->shost_gendev, data, fn);
 +	if (dev)
 +		fnode_sess = iscsi_dev_to_flash_session(dev);
 +
 +	return fnode_sess;
 +}
 +
 +/**
 + * iscsi_find_flashnode_sess - finds flashnode session entry
 + * @shost: pointer to host data
 + * @data: pointer to data containing value to use for comparison
 + * @fn: function pointer that does actual comparison
 + *
 + * Finds the flashnode session object comparing the data passed using logic
 + * defined in passed function pointer
 + *
 + * Returns:
 + *  pointer to found flashnode session device object on success
 + *  %NULL on failure
 + */
 +struct device *
 +iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
 +			  int (*fn)(struct device *dev, void *data))
 +{
 +	struct device *dev;
 +
 +	dev = device_find_child(&shost->shost_gendev, data, fn);
 +	return dev;
 +}
 +EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
 +
 +/**
 + * iscsi_find_flashnode_conn - finds flashnode connection entry
 + * @fnode_sess: pointer to parent flashnode session entry
 + * @data: pointer to data containing value to use for comparison
 + * @fn: function pointer that does actual comparison
 + *
 + * Finds the flashnode connection object comparing the data passed using logic
 + * defined in passed function pointer
 + *
 + * Returns:
 + *  pointer to found flashnode connection device object on success
 + *  %NULL on failure
 + */
 +struct device *
 +iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
 +			  void *data,
 +			  int (*fn)(struct device *dev, void *data))
 +{
 +	struct device *dev;
 +
 +	dev = device_find_child(&fnode_sess->dev, data, fn);
 +	return dev;
 +}
 +EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
 +
 +static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data)
 +{
 +	if (!iscsi_is_flashnode_conn_dev(dev, NULL))
 +		return 0;
 +
 +	return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev));
 +}
 +
 +/**
 + * iscsi_destroy_flashnode_sess - destory flashnode session entry
 + * @fnode_sess: pointer to flashnode session entry to be destroyed
 + *
 + * Deletes the flashnode session entry and all children flashnode connection
 + * entries from sysfs
 + */
 +void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess)
 +{
 +	int err;
 +
 +	err = device_for_each_child(&fnode_sess->dev, NULL,
 +				    iscsi_iter_destroy_flashnode_conn_fn);
 +	if (err)
 +		pr_err("Could not delete all connections for %s. Error %d.\n",
 +		       fnode_sess->dev.kobj.name, err);
 +
 +	device_unregister(&fnode_sess->dev);
 +}
 +EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess);
 +
 +static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data)
 +{
 +	if (!iscsi_flashnode_bus_match(dev, NULL))
 +		return 0;
 +
 +	iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev));
 +	return 0;
 +}
 +
 +/**
 + * iscsi_destroy_all_flashnode - destory all flashnode session entries
 + * @shost: pointer to host data
 + *
 + * Destroys all the flashnode session entries and all corresponding children
 + * flashnode connection entries from sysfs
 + */
 +void iscsi_destroy_all_flashnode(struct Scsi_Host *shost)
 +{
 +	device_for_each_child(&shost->shost_gendev, NULL,
 +			      iscsi_iter_destroy_flashnode_fn);
 +}
 +EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode);
 +
 +/*
   * BSG support
   */
  /**
@@@ -2028,8 -1344,8 +2028,8 @@@ int iscsi_recv_pdu(struct iscsi_cls_con
  	struct iscsi_uevent *ev;
  	char *pdu;
  	struct iscsi_internal *priv;
- 	int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) +
- 			      data_size);
+ 	int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) +
+ 				   data_size);
  
  	priv = iscsi_if_transport_lookup(conn->transport);
  	if (!priv)
@@@ -2044,7 -1360,7 +2044,7 @@@
  	}
  
  	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- 	ev = NLMSG_DATA(nlh);
+ 	ev = nlmsg_data(nlh);
  	memset(ev, 0, sizeof(*ev));
  	ev->transport_handle = iscsi_handle(conn->transport);
  	ev->type = ISCSI_KEVENT_RECV_PDU;
@@@ -2065,7 -1381,7 +2065,7 @@@ int iscsi_offload_mesg(struct Scsi_Hos
  	struct nlmsghdr	*nlh;
  	struct sk_buff *skb;
  	struct iscsi_uevent *ev;
- 	int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+ 	int len = nlmsg_total_size(sizeof(*ev) + data_size);
  
  	skb = alloc_skb(len, GFP_ATOMIC);
  	if (!skb) {
@@@ -2074,7 -1390,7 +2074,7 @@@
  	}
  
  	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- 	ev = NLMSG_DATA(nlh);
+ 	ev = nlmsg_data(nlh);
  	memset(ev, 0, sizeof(*ev));
  	ev->type = type;
  	ev->transport_handle = iscsi_handle(transport);
@@@ -2099,7 -1415,7 +2099,7 @@@ void iscsi_conn_error_event(struct iscs
  	struct sk_buff	*skb;
  	struct iscsi_uevent *ev;
  	struct iscsi_internal *priv;
- 	int len = NLMSG_SPACE(sizeof(*ev));
+ 	int len = nlmsg_total_size(sizeof(*ev));
  
  	priv = iscsi_if_transport_lookup(conn->transport);
  	if (!priv)
@@@ -2113,7 -1429,7 +2113,7 @@@
  	}
  
  	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- 	ev = NLMSG_DATA(nlh);
+ 	ev = nlmsg_data(nlh);
  	ev->transport_handle = iscsi_handle(conn->transport);
  	ev->type = ISCSI_KEVENT_CONN_ERROR;
  	ev->r.connerror.error = error;
@@@ -2134,7 -1450,7 +2134,7 @@@ void iscsi_conn_login_event(struct iscs
  	struct sk_buff  *skb;
  	struct iscsi_uevent *ev;
  	struct iscsi_internal *priv;
- 	int len = NLMSG_SPACE(sizeof(*ev));
+ 	int len = nlmsg_total_size(sizeof(*ev));
  
  	priv = iscsi_if_transport_lookup(conn->transport);
  	if (!priv)
@@@ -2148,7 -1464,7 +2148,7 @@@
  	}
  
  	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- 	ev = NLMSG_DATA(nlh);
+ 	ev = nlmsg_data(nlh);
  	ev->transport_handle = iscsi_handle(conn->transport);
  	ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE;
  	ev->r.conn_login.state = state;
@@@ -2168,7 -1484,7 +2168,7 @@@ void iscsi_post_host_event(uint32_t hos
  	struct nlmsghdr *nlh;
  	struct sk_buff *skb;
  	struct iscsi_uevent *ev;
- 	int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+ 	int len = nlmsg_total_size(sizeof(*ev) + data_size);
  
  	skb = alloc_skb(len, GFP_NOIO);
  	if (!skb) {
@@@ -2178,7 -1494,7 +2178,7 @@@
  	}
  
  	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- 	ev = NLMSG_DATA(nlh);
+ 	ev = nlmsg_data(nlh);
  	ev->transport_handle = iscsi_handle(transport);
  	ev->type = ISCSI_KEVENT_HOST_EVENT;
  	ev->r.host_event.host_no = host_no;
@@@ -2199,7 -1515,7 +2199,7 @@@ void iscsi_ping_comp_event(uint32_t hos
  	struct nlmsghdr *nlh;
  	struct sk_buff *skb;
  	struct iscsi_uevent *ev;
- 	int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+ 	int len = nlmsg_total_size(sizeof(*ev) + data_size);
  
  	skb = alloc_skb(len, GFP_NOIO);
  	if (!skb) {
@@@ -2208,7 -1524,7 +2208,7 @@@
  	}
  
  	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- 	ev = NLMSG_DATA(nlh);
+ 	ev = nlmsg_data(nlh);
  	ev->transport_handle = iscsi_handle(transport);
  	ev->type = ISCSI_KEVENT_PING_COMP;
  	ev->r.ping_comp.host_no = host_no;
@@@ -2227,7 -1543,7 +2227,7 @@@ iscsi_if_send_reply(uint32_t group, in
  {
  	struct sk_buff	*skb;
  	struct nlmsghdr	*nlh;
- 	int len = NLMSG_SPACE(size);
+ 	int len = nlmsg_total_size(size);
  	int flags = multi ? NLM_F_MULTI : 0;
  	int t = done ? NLMSG_DONE : type;
  
@@@ -2239,24 -1555,24 +2239,24 @@@
  
  	nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
  	nlh->nlmsg_flags = flags;
- 	memcpy(NLMSG_DATA(nlh), payload, size);
+ 	memcpy(nlmsg_data(nlh), payload, size);
  	return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
  }
  
  static int
  iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
  {
- 	struct iscsi_uevent *ev = NLMSG_DATA(nlh);
+ 	struct iscsi_uevent *ev = nlmsg_data(nlh);
  	struct iscsi_stats *stats;
  	struct sk_buff *skbstat;
  	struct iscsi_cls_conn *conn;
  	struct nlmsghdr	*nlhstat;
  	struct iscsi_uevent *evstat;
  	struct iscsi_internal *priv;
- 	int len = NLMSG_SPACE(sizeof(*ev) +
- 			      sizeof(struct iscsi_stats) +
- 			      sizeof(struct iscsi_stats_custom) *
- 			      ISCSI_STATS_CUSTOM_MAX);
+ 	int len = nlmsg_total_size(sizeof(*ev) +
+ 				   sizeof(struct iscsi_stats) +
+ 				   sizeof(struct iscsi_stats_custom) *
+ 				   ISCSI_STATS_CUSTOM_MAX);
  	int err = 0;
  
  	priv = iscsi_if_transport_lookup(transport);
@@@ -2279,7 -1595,7 +2279,7 @@@
  
  		nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
  				      (len - sizeof(*nlhstat)), 0);
- 		evstat = NLMSG_DATA(nlhstat);
+ 		evstat = nlmsg_data(nlhstat);
  		memset(evstat, 0, sizeof(*evstat));
  		evstat->transport_handle = iscsi_handle(conn->transport);
  		evstat->type = nlh->nlmsg_type;
@@@ -2292,12 -1608,12 +2292,12 @@@
  		memset(stats, 0, sizeof(*stats));
  
  		transport->get_stats(conn, stats);
- 		actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) +
- 					  sizeof(struct iscsi_stats) +
- 					  sizeof(struct iscsi_stats_custom) *
- 					  stats->custom_length);
+ 		actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) +
+ 					       sizeof(struct iscsi_stats) +
+ 					       sizeof(struct iscsi_stats_custom) *
+ 					       stats->custom_length);
  		actual_size -= sizeof(*nlhstat);
- 		actual_size = NLMSG_LENGTH(actual_size);
+ 		actual_size = nlmsg_msg_size(actual_size);
  		skb_trim(skbstat, NLMSG_ALIGN(actual_size));
  		nlhstat->nlmsg_len = actual_size;
  
@@@ -2321,7 -1637,7 +2321,7 @@@ int iscsi_session_event(struct iscsi_cl
  	struct iscsi_uevent *ev;
  	struct sk_buff  *skb;
  	struct nlmsghdr *nlh;
- 	int rc, len = NLMSG_SPACE(sizeof(*ev));
+ 	int rc, len = nlmsg_total_size(sizeof(*ev));
  
  	priv = iscsi_if_transport_lookup(session->transport);
  	if (!priv)
@@@ -2337,7 -1653,7 +2337,7 @@@
  	}
  
  	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- 	ev = NLMSG_DATA(nlh);
+ 	ev = nlmsg_data(nlh);
  	ev->transport_handle = iscsi_handle(session->transport);
  
  	ev->type = event;
@@@ -2689,7 -2005,7 +2689,7 @@@ iscsi_send_ping(struct iscsi_transport 
  static int
  iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
  {
- 	struct iscsi_uevent *ev = NLMSG_DATA(nlh);
+ 	struct iscsi_uevent *ev = nlmsg_data(nlh);
  	struct Scsi_Host *shost = NULL;
  	struct iscsi_chap_rec *chap_rec;
  	struct iscsi_internal *priv;
@@@ -2708,7 -2024,7 +2708,7 @@@
  		return -EINVAL;
  
  	chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec));
- 	len = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
+ 	len = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
  
  	shost = scsi_host_lookup(ev->u.get_chap.host_no);
  	if (!shost) {
@@@ -2729,7 -2045,7 +2729,7 @@@
  
  		nlhchap = __nlmsg_put(skbchap, 0, 0, 0,
  				      (len - sizeof(*nlhchap)), 0);
- 		evchap = NLMSG_DATA(nlhchap);
+ 		evchap = nlmsg_data(nlhchap);
  		memset(evchap, 0, sizeof(*evchap));
  		evchap->transport_handle = iscsi_handle(transport);
  		evchap->type = nlh->nlmsg_type;
@@@ -2742,7 -2058,7 +2742,7 @@@
  		err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
  				    &evchap->u.get_chap.num_entries, buf);
  
- 		actual_size = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
+ 		actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
  		skb_trim(skbchap, NLMSG_ALIGN(actual_size));
  		nlhchap->nlmsg_len = actual_size;
  
@@@ -2776,299 -2092,11 +2776,299 @@@ static int iscsi_delete_chap(struct isc
  	return err;
  }
  
 +static const struct {
 +	enum iscsi_discovery_parent_type value;
 +	char				*name;
 +} iscsi_discovery_parent_names[] = {
 +	{ISCSI_DISC_PARENT_UNKNOWN,	"Unknown" },
 +	{ISCSI_DISC_PARENT_SENDTGT,	"Sendtarget" },
 +	{ISCSI_DISC_PARENT_ISNS,	"isns" },
 +};
 +
 +char *iscsi_get_discovery_parent_name(int parent_type)
 +{
 +	int i;
 +	char *state = "Unknown!";
 +
 +	for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) {
 +		if (iscsi_discovery_parent_names[i].value & parent_type) {
 +			state = iscsi_discovery_parent_names[i].name;
 +			break;
 +		}
 +	}
 +	return state;
 +}
 +EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name);
 +
 +static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
 +				     struct iscsi_uevent *ev, uint32_t len)
 +{
 +	char *data = (char *)ev + sizeof(*ev);
 +	struct Scsi_Host *shost;
 +	struct iscsi_bus_flash_session *fnode_sess;
 +	struct iscsi_bus_flash_conn *fnode_conn;
 +	struct device *dev;
 +	uint32_t *idx;
 +	int err = 0;
 +
 +	if (!transport->set_flashnode_param) {
 +		err = -ENOSYS;
 +		goto exit_set_fnode;
 +	}
 +
 +	shost = scsi_host_lookup(ev->u.set_flashnode.host_no);
 +	if (!shost) {
 +		pr_err("%s could not find host no %u\n",
 +		       __func__, ev->u.set_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	idx = &ev->u.set_flashnode.flashnode_idx;
 +	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
 +						  flashnode_match_index);
 +	if (!fnode_sess) {
 +		pr_err("%s could not find flashnode %u for host no %u\n",
 +		       __func__, *idx, ev->u.set_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
 +					iscsi_is_flashnode_conn_dev);
 +	if (!dev) {
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	fnode_conn = iscsi_dev_to_flash_conn(dev);
 +	err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
 +
 +put_host:
 +	scsi_host_put(shost);
 +
 +exit_set_fnode:
 +	return err;
 +}
 +
 +static int iscsi_new_flashnode(struct iscsi_transport *transport,
 +			       struct iscsi_uevent *ev, uint32_t len)
 +{
 +	char *data = (char *)ev + sizeof(*ev);
 +	struct Scsi_Host *shost;
 +	int index;
 +	int err = 0;
 +
 +	if (!transport->new_flashnode) {
 +		err = -ENOSYS;
 +		goto exit_new_fnode;
 +	}
 +
 +	shost = scsi_host_lookup(ev->u.new_flashnode.host_no);
 +	if (!shost) {
 +		pr_err("%s could not find host no %u\n",
 +		       __func__, ev->u.new_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	index = transport->new_flashnode(shost, data, len);
 +
 +	if (index >= 0)
 +		ev->r.new_flashnode_ret.flashnode_idx = index;
 +	else
 +		err = -EIO;
 +
 +put_host:
 +	scsi_host_put(shost);
 +
 +exit_new_fnode:
 +	return err;
 +}
 +
 +static int iscsi_del_flashnode(struct iscsi_transport *transport,
 +			       struct iscsi_uevent *ev)
 +{
 +	struct Scsi_Host *shost;
 +	struct iscsi_bus_flash_session *fnode_sess;
 +	uint32_t *idx;
 +	int err = 0;
 +
 +	if (!transport->del_flashnode) {
 +		err = -ENOSYS;
 +		goto exit_del_fnode;
 +	}
 +
 +	shost = scsi_host_lookup(ev->u.del_flashnode.host_no);
 +	if (!shost) {
 +		pr_err("%s could not find host no %u\n",
 +		       __func__, ev->u.del_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	idx = &ev->u.del_flashnode.flashnode_idx;
 +	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
 +						  flashnode_match_index);
 +	if (!fnode_sess) {
 +		pr_err("%s could not find flashnode %u for host no %u\n",
 +		       __func__, *idx, ev->u.del_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	err = transport->del_flashnode(fnode_sess);
 +
 +put_host:
 +	scsi_host_put(shost);
 +
 +exit_del_fnode:
 +	return err;
 +}
 +
 +static int iscsi_login_flashnode(struct iscsi_transport *transport,
 +				 struct iscsi_uevent *ev)
 +{
 +	struct Scsi_Host *shost;
 +	struct iscsi_bus_flash_session *fnode_sess;
 +	struct iscsi_bus_flash_conn *fnode_conn;
 +	struct device *dev;
 +	uint32_t *idx;
 +	int err = 0;
 +
 +	if (!transport->login_flashnode) {
 +		err = -ENOSYS;
 +		goto exit_login_fnode;
 +	}
 +
 +	shost = scsi_host_lookup(ev->u.login_flashnode.host_no);
 +	if (!shost) {
 +		pr_err("%s could not find host no %u\n",
 +		       __func__, ev->u.login_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	idx = &ev->u.login_flashnode.flashnode_idx;
 +	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
 +						  flashnode_match_index);
 +	if (!fnode_sess) {
 +		pr_err("%s could not find flashnode %u for host no %u\n",
 +		       __func__, *idx, ev->u.login_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
 +					iscsi_is_flashnode_conn_dev);
 +	if (!dev) {
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	fnode_conn = iscsi_dev_to_flash_conn(dev);
 +	err = transport->login_flashnode(fnode_sess, fnode_conn);
 +
 +put_host:
 +	scsi_host_put(shost);
 +
 +exit_login_fnode:
 +	return err;
 +}
 +
 +static int iscsi_logout_flashnode(struct iscsi_transport *transport,
 +				  struct iscsi_uevent *ev)
 +{
 +	struct Scsi_Host *shost;
 +	struct iscsi_bus_flash_session *fnode_sess;
 +	struct iscsi_bus_flash_conn *fnode_conn;
 +	struct device *dev;
 +	uint32_t *idx;
 +	int err = 0;
 +
 +	if (!transport->logout_flashnode) {
 +		err = -ENOSYS;
 +		goto exit_logout_fnode;
 +	}
 +
 +	shost = scsi_host_lookup(ev->u.logout_flashnode.host_no);
 +	if (!shost) {
 +		pr_err("%s could not find host no %u\n",
 +		       __func__, ev->u.logout_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	idx = &ev->u.logout_flashnode.flashnode_idx;
 +	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
 +						  flashnode_match_index);
 +	if (!fnode_sess) {
 +		pr_err("%s could not find flashnode %u for host no %u\n",
 +		       __func__, *idx, ev->u.logout_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
 +					iscsi_is_flashnode_conn_dev);
 +	if (!dev) {
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	fnode_conn = iscsi_dev_to_flash_conn(dev);
 +
 +	err = transport->logout_flashnode(fnode_sess, fnode_conn);
 +
 +put_host:
 +	scsi_host_put(shost);
 +
 +exit_logout_fnode:
 +	return err;
 +}
 +
 +static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
 +				      struct iscsi_uevent *ev)
 +{
 +	struct Scsi_Host *shost;
 +	struct iscsi_cls_session *session;
 +	int err = 0;
 +
 +	if (!transport->logout_flashnode_sid) {
 +		err = -ENOSYS;
 +		goto exit_logout_sid;
 +	}
 +
 +	shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no);
 +	if (!shost) {
 +		pr_err("%s could not find host no %u\n",
 +		       __func__, ev->u.logout_flashnode.host_no);
 +		err = -ENODEV;
 +		goto put_host;
 +	}
 +
 +	session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
 +	if (!session) {
 +		pr_err("%s could not find session id %u\n",
 +		       __func__, ev->u.logout_flashnode_sid.sid);
 +		err = -EINVAL;
 +		goto put_host;
 +	}
 +
 +	err = transport->logout_flashnode_sid(session);
 +
 +put_host:
 +	scsi_host_put(shost);
 +
 +exit_logout_sid:
 +	return err;
 +}
 +
  static int
  iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
  {
  	int err = 0;
- 	struct iscsi_uevent *ev = NLMSG_DATA(nlh);
+ 	struct iscsi_uevent *ev = nlmsg_data(nlh);
  	struct iscsi_transport *transport = NULL;
  	struct iscsi_internal *priv;
  	struct iscsi_cls_session *session;
@@@ -3218,27 -2246,6 +3218,27 @@@
  	case ISCSI_UEVENT_DELETE_CHAP:
  		err = iscsi_delete_chap(transport, ev);
  		break;
 +	case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
 +		err = iscsi_set_flashnode_param(transport, ev,
 +						nlmsg_attrlen(nlh,
 +							      sizeof(*ev)));
 +		break;
 +	case ISCSI_UEVENT_NEW_FLASHNODE:
 +		err = iscsi_new_flashnode(transport, ev,
 +					  nlmsg_attrlen(nlh, sizeof(*ev)));
 +		break;
 +	case ISCSI_UEVENT_DEL_FLASHNODE:
 +		err = iscsi_del_flashnode(transport, ev);
 +		break;
 +	case ISCSI_UEVENT_LOGIN_FLASHNODE:
 +		err = iscsi_login_flashnode(transport, ev);
 +		break;
 +	case ISCSI_UEVENT_LOGOUT_FLASHNODE:
 +		err = iscsi_logout_flashnode(transport, ev);
 +		break;
 +	case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
 +		err = iscsi_logout_flashnode_sid(transport, ev);
 +		break;
  	default:
  		err = -ENOSYS;
  		break;
@@@ -3256,7 -2263,7 +3256,7 @@@ static voi
  iscsi_if_rx(struct sk_buff *skb)
  {
  	mutex_lock(&rx_queue_mutex);
- 	while (skb->len >= NLMSG_SPACE(0)) {
+ 	while (skb->len >= NLMSG_HDRLEN) {
  		int err;
  		uint32_t rlen;
  		struct nlmsghdr	*nlh;
@@@ -3269,7 -2276,7 +3269,7 @@@
  			break;
  		}
  
- 		ev = NLMSG_DATA(nlh);
+ 		ev = nlmsg_data(nlh);
  		rlen = NLMSG_ALIGN(nlh->nlmsg_len);
  		if (rlen > skb->len)
  			rlen = skb->len;
@@@ -3974,14 -2981,10 +3974,14 @@@ static __init int iscsi_transport_init(
  	if (err)
  		goto unregister_conn_class;
  
 +	err = bus_register(&iscsi_flashnode_bus);
 +	if (err)
 +		goto unregister_session_class;
 +
  	nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
  	if (!nls) {
  		err = -ENOBUFS;
 -		goto unregister_session_class;
 +		goto unregister_flashnode_bus;
  	}
  
  	iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
@@@ -3992,8 -2995,6 +3992,8 @@@
  
  release_nls:
  	netlink_kernel_release(nls);
 +unregister_flashnode_bus:
 +	bus_unregister(&iscsi_flashnode_bus);
  unregister_session_class:
  	transport_class_unregister(&iscsi_session_class);
  unregister_conn_class:
@@@ -4013,7 -3014,6 +4013,7 @@@ static void __exit iscsi_transport_exit
  {
  	destroy_workqueue(iscsi_eh_timer_workq);
  	netlink_kernel_release(nls);
 +	bus_unregister(&iscsi_flashnode_bus);
  	transport_class_unregister(&iscsi_connection_class);
  	transport_class_unregister(&iscsi_session_class);
  	transport_class_unregister(&iscsi_host_class);
diff --combined drivers/staging/gdm72xx/netlink_k.c
index 8a92605,c1239aa..af7f1c1
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@@ -15,10 -15,9 +15,10 @@@
  
  #include <linux/module.h>
  #include <linux/etherdevice.h>
- #include <linux/netlink.h>
+ #include <net/netlink.h>
  #include <asm/byteorder.h>
  #include <net/sock.h>
 +#include "netlink_k.h"
  
  #if !defined(NLMSG_HDRLEN)
  #define NLMSG_HDRLEN	 ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
@@@ -26,12 -25,12 +26,12 @@@
  
  #define ND_MAX_GROUP			30
  #define ND_IFINDEX_LEN			sizeof(int)
- #define ND_NLMSG_SPACE(len)		(NLMSG_SPACE(len) + ND_IFINDEX_LEN)
+ #define ND_NLMSG_SPACE(len)		(nlmsg_total_size(len) + ND_IFINDEX_LEN)
  #define ND_NLMSG_DATA(nlh) \
- 	((void *)((char *)NLMSG_DATA(nlh) + ND_IFINDEX_LEN))
+ 	((void *)((char *)nlmsg_data(nlh) + ND_IFINDEX_LEN))
  #define ND_NLMSG_S_LEN(len)		(len+ND_IFINDEX_LEN)
  #define ND_NLMSG_R_LEN(nlh)		(nlh->nlmsg_len-ND_IFINDEX_LEN)
- #define ND_NLMSG_IFIDX(nlh)		NLMSG_DATA(nlh)
+ #define ND_NLMSG_IFIDX(nlh)		nlmsg_data(nlh)
  #define ND_MAX_MSG_LEN			8096
  
  #if defined(DEFINE_MUTEX)
@@@ -52,7 -51,7 +52,7 @@@ static void netlink_rcv_cb(struct sk_bu
  	void *msg;
  	int ifindex;
  
- 	if (skb->len >= NLMSG_SPACE(0)) {
+ 	if (skb->len >= NLMSG_HDRLEN) {
  		nlh = (struct nlmsghdr *)skb->data;
  
  		if (skb->len < nlh->nlmsg_len ||
@@@ -125,7 -124,7 +125,7 @@@ int netlink_send(struct sock *sock, in
  		return -EINVAL;
  	}
  
- 	skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC);
+ 	skb = nlmsg_new(len, GFP_ATOMIC);
  	if (!skb) {
  		pr_err("netlink_broadcast ret=%d\n", ret);
  		return -ENOMEM;
diff --combined include/linux/mlx4/device.h
index ad4a53f,53acaf6..a51b013
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@@ -40,6 -40,8 +40,8 @@@
  
  #include <linux/atomic.h>
  
+ #include <linux/clocksource.h>
+ 
  #define MAX_MSIX_P_PORT		17
  #define MAX_MSIX		64
  #define MSIX_LEGACY_SZ		4
@@@ -140,6 -142,7 +142,7 @@@ enum 
  	MLX4_DEV_CAP_FLAG_VEP_UC_STEER	= 1LL << 41,
  	MLX4_DEV_CAP_FLAG_VEP_MC_STEER	= 1LL << 42,
  	MLX4_DEV_CAP_FLAG_COUNTERS	= 1LL << 48,
+ 	MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
  	MLX4_DEV_CAP_FLAG_SENSE_SUPPORT	= 1LL << 55,
  	MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
  	MLX4_DEV_CAP_FLAG_64B_EQE	= 1LL << 61,
@@@ -151,7 -154,10 +154,10 @@@ enum 
  	MLX4_DEV_CAP_FLAG2_RSS_TOP		= 1LL <<  1,
  	MLX4_DEV_CAP_FLAG2_RSS_XOR		= 1LL <<  2,
  	MLX4_DEV_CAP_FLAG2_FS_EN		= 1LL <<  3,
- 	MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN	= 1LL <<  4
+ 	MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN	= 1LL <<  4,
+ 	MLX4_DEV_CAP_FLAG2_TS			= 1LL <<  5,
+ 	MLX4_DEV_CAP_FLAG2_VLAN_CONTROL		= 1LL <<  6,
+ 	MLX4_DEV_CAP_FLAG2_FSM			= 1LL <<  7
  };
  
  enum {
@@@ -443,6 -449,7 +449,7 @@@ struct mlx4_caps 
  	u8			eqe_factor;
  	u32			userspace_caps; /* userspace must be aware of these */
  	u32			function_caps;  /* VFs must be aware of these */
+ 	u16			hca_core_clock;
  };
  
  struct mlx4_buf_list {
@@@ -837,7 -844,7 +844,7 @@@ void mlx4_free_hwq_res(struct mlx4_dev 
  
  int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
  		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- 		  unsigned vector, int collapsed);
+ 		  unsigned vector, int collapsed, int timestamp_en);
  void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
  
  int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
@@@ -896,12 -903,11 +903,12 @@@ static inline int map_hw_to_sw_id(u16 h
  }
  
  enum mlx4_net_trans_promisc_mode {
 -	MLX4_FS_PROMISC_NONE = 0,
 -	MLX4_FS_PROMISC_UPLINK,
 -	/* For future use. Not implemented yet */
 -	MLX4_FS_PROMISC_FUNCTION_PORT,
 -	MLX4_FS_PROMISC_ALL_MULTI,
 +	MLX4_FS_REGULAR = 1,
 +	MLX4_FS_ALL_DEFAULT,
 +	MLX4_FS_MC_DEFAULT,
 +	MLX4_FS_UC_SNIFFER,
 +	MLX4_FS_MC_SNIFFER,
 +	MLX4_FS_MODE_NUM, /* should be last */
  };
  
  struct mlx4_spec_eth {
@@@ -930,7 -936,7 +937,7 @@@ struct mlx4_spec_ipv4 
  };
  
  struct mlx4_spec_ib {
 -	__be32	r_qpn;
 +	__be32  l3_qpn;
  	__be32	qpn_msk;
  	u8	dst_gid[16];
  	u8	dst_gid_msk[16];
@@@ -963,92 -969,6 +970,92 @@@ struct mlx4_net_trans_rule 
  	u32	qpn;
  };
  
 +struct mlx4_net_trans_rule_hw_ctrl {
 +	__be16 prio;
 +	u8 type;
 +	u8 flags;
 +	u8 rsvd1;
 +	u8 funcid;
 +	u8 vep;
 +	u8 port;
 +	__be32 qpn;
 +	__be32 rsvd2;
 +};
 +
 +struct mlx4_net_trans_rule_hw_ib {
 +	u8 size;
 +	u8 rsvd1;
 +	__be16 id;
 +	u32 rsvd2;
 +	__be32 l3_qpn;
 +	__be32 qpn_mask;
 +	u8 dst_gid[16];
 +	u8 dst_gid_msk[16];
 +} __packed;
 +
 +struct mlx4_net_trans_rule_hw_eth {
 +	u8	size;
 +	u8	rsvd;
 +	__be16	id;
 +	u8	rsvd1[6];
 +	u8	dst_mac[6];
 +	u16	rsvd2;
 +	u8	dst_mac_msk[6];
 +	u16	rsvd3;
 +	u8	src_mac[6];
 +	u16	rsvd4;
 +	u8	src_mac_msk[6];
 +	u8      rsvd5;
 +	u8      ether_type_enable;
 +	__be16  ether_type;
 +	__be16  vlan_tag_msk;
 +	__be16  vlan_tag;
 +} __packed;
 +
 +struct mlx4_net_trans_rule_hw_tcp_udp {
 +	u8	size;
 +	u8	rsvd;
 +	__be16	id;
 +	__be16	rsvd1[3];
 +	__be16	dst_port;
 +	__be16	rsvd2;
 +	__be16	dst_port_msk;
 +	__be16	rsvd3;
 +	__be16	src_port;
 +	__be16	rsvd4;
 +	__be16	src_port_msk;
 +} __packed;
 +
 +struct mlx4_net_trans_rule_hw_ipv4 {
 +	u8	size;
 +	u8	rsvd;
 +	__be16	id;
 +	__be32	rsvd1;
 +	__be32	dst_ip;
 +	__be32	dst_ip_msk;
 +	__be32	src_ip;
 +	__be32	src_ip_msk;
 +} __packed;
 +
 +struct _rule_hw {
 +	union {
 +		struct {
 +			u8 size;
 +			u8 rsvd;
 +			__be16 id;
 +		};
 +		struct mlx4_net_trans_rule_hw_eth eth;
 +		struct mlx4_net_trans_rule_hw_ib ib;
 +		struct mlx4_net_trans_rule_hw_ipv4 ipv4;
 +		struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
 +	};
 +};
 +
 +/* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */
 +struct mlx4_flow_handle {
 +	u64 reg_id[2];
 +};
 +
  int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
  				enum mlx4_net_trans_promisc_mode mode);
  int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
@@@ -1098,11 -1018,6 +1105,11 @@@ void mlx4_counter_free(struct mlx4_dev 
  int mlx4_flow_attach(struct mlx4_dev *dev,
  		     struct mlx4_net_trans_rule *rule, u64 *reg_id);
  int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
 +int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
 +				    enum mlx4_net_trans_promisc_mode flow_type);
 +int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
 +				  enum mlx4_net_trans_rule_id id);
 +int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
  
  void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
  			  int i, int val);
@@@ -1120,4 -1035,6 +1127,6 @@@ int set_and_calc_slave_port_state(struc
  void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
  __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
  
+ cycle_t mlx4_read_clock(struct mlx4_dev *dev);
+ 
  #endif /* MLX4_DEVICE_H */
diff --combined include/linux/netdevice.h
index 2faf6c0,f8898a4..a94a5a0
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@@ -144,8 -144,6 +144,6 @@@ static inline bool dev_xmit_complete(in
  # else
  #  define LL_MAX_HEADER 96
  # endif
- #elif IS_ENABLED(CONFIG_TR)
- # define LL_MAX_HEADER 48
  #else
  # define LL_MAX_HEADER 32
  #endif
@@@ -211,6 -209,7 +209,7 @@@ struct netdev_hw_addr 
  #define NETDEV_HW_ADDR_T_UNICAST	4
  #define NETDEV_HW_ADDR_T_MULTICAST	5
  	bool			global_use;
+ 	int			sync_cnt;
  	int			refcount;
  	int			synced;
  	struct rcu_head		rcu_head;
@@@ -594,6 -593,7 +593,6 @@@ struct rps_dev_flow 
  struct rps_dev_flow_table {
  	unsigned int mask;
  	struct rcu_head rcu;
 -	struct work_struct free_work;
  	struct rps_dev_flow flows[0];
  };
  #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
@@@ -784,13 -784,13 +783,13 @@@ struct netdev_fcoe_hbainfo 
   *	3. Update dev->stats asynchronously and atomically, and define
   *	   neither operation.
   *
-  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
-  *	If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
-  *	this function is called when a VLAN id is registered.
+  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
+  *	If device support VLAN filtering this function is called when a
+  *	VLAN id is registered.
   *
   * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
-  *	If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
-  *	this function is called when a VLAN id is unregistered.
+  *	If device support VLAN filtering this function is called when a
+  *	VLAN id is unregistered.
   *
   * void (*ndo_poll_controller)(struct net_device *dev);
   *
@@@ -934,9 -934,9 +933,9 @@@ struct net_device_ops 
  	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  
  	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
- 						       unsigned short vid);
+ 						       __be16 proto, u16 vid);
  	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
- 						        unsigned short vid);
+ 						        __be16 proto, u16 vid);
  #ifdef CONFIG_NET_POLL_CONTROLLER
  	void                    (*ndo_poll_controller)(struct net_device *dev);
  	int			(*ndo_netpoll_setup)(struct net_device *dev,
@@@ -1072,6 -1072,8 +1071,8 @@@ struct net_device 
  	struct list_head	dev_list;
  	struct list_head	napi_list;
  	struct list_head	unreg_list;
+ 	struct list_head	upper_dev_list; /* List of upper devices */
+ 
  
  	/* currently active device features */
  	netdev_features_t	features;
@@@ -1144,6 -1146,13 +1145,13 @@@
  	spinlock_t		addr_list_lock;
  	struct netdev_hw_addr_list	uc;	/* Unicast mac addresses */
  	struct netdev_hw_addr_list	mc;	/* Multicast mac addresses */
+ 	struct netdev_hw_addr_list	dev_addrs; /* list of device
+ 						    * hw addresses
+ 						    */
+ #ifdef CONFIG_SYSFS
+ 	struct kset		*queues_kset;
+ #endif
+ 
  	bool			uc_promisc;
  	unsigned int		promiscuity;
  	unsigned int		allmulti;
@@@ -1176,21 -1185,11 +1184,11 @@@
  						 * avoid dirtying this cache line.
  						 */
  
- 	struct list_head	upper_dev_list; /* List of upper devices */
- 
  	/* Interface address info used in eth_type_trans() */
  	unsigned char		*dev_addr;	/* hw address, (before bcast
  						   because most packets are
  						   unicast) */
  
- 	struct netdev_hw_addr_list	dev_addrs; /* list of device
- 						      hw addresses */
- 
- 	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/
- 
- #ifdef CONFIG_SYSFS
- 	struct kset		*queues_kset;
- #endif
  
  #ifdef CONFIG_RPS
  	struct netdev_rx_queue	*_rx;
@@@ -1201,18 -1200,14 +1199,14 @@@
  	/* Number of RX queues currently active in device */
  	unsigned int		real_num_rx_queues;
  
- #ifdef CONFIG_RFS_ACCEL
- 	/* CPU reverse-mapping for RX completion interrupts, indexed
- 	 * by RX queue number.  Assigned by driver.  This must only be
- 	 * set if the ndo_rx_flow_steer operation is defined. */
- 	struct cpu_rmap		*rx_cpu_rmap;
- #endif
  #endif
  
  	rx_handler_func_t __rcu	*rx_handler;
  	void __rcu		*rx_handler_data;
  
  	struct netdev_queue __rcu *ingress_queue;
+ 	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/
+ 
  
  /*
   * Cache lines mostly used on transmit path
@@@ -1234,6 -1229,12 +1228,12 @@@
  #ifdef CONFIG_XPS
  	struct xps_dev_maps __rcu *xps_maps;
  #endif
+ #ifdef CONFIG_RFS_ACCEL
+ 	/* CPU reverse-mapping for RX completion interrupts, indexed
+ 	 * by RX queue number.  Assigned by driver.  This must only be
+ 	 * set if the ndo_rx_flow_steer operation is defined. */
+ 	struct cpu_rmap		*rx_cpu_rmap;
+ #endif
  
  	/* These may be needed for future network-power-down code. */
  
@@@ -1474,6 -1475,11 +1474,11 @@@ static inline void *netdev_priv(const s
   */
  #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
  
+ /* Default NAPI poll() weight
+  * Device drivers are strongly advised to not use bigger value
+  */
+ #define NAPI_POLL_WEIGHT 64
+ 
  /**
   *	netif_napi_add - initialize a napi context
   *	@dev:  network device
@@@ -1611,6 -1617,9 +1616,9 @@@ extern seqcount_t	devnet_rename_seq;	/
  		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  #define for_each_netdev_continue_rcu(net, d)		\
  	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
+ #define for_each_netdev_in_bond_rcu(bond, slave)	\
+ 		for_each_netdev_rcu(&init_net, slave)	\
+ 			if (netdev_master_upper_dev_get_rcu(slave) == bond)
  #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
  
  static inline struct net_device *next_net_device(struct net_device *dev)
@@@ -1683,7 -1692,6 +1691,6 @@@ extern int 		netdev_refcnt_read(const s
  extern void		free_netdev(struct net_device *dev);
  extern void		synchronize_net(void);
  extern int		init_dummy_netdev(struct net_device *dev);
- extern void		netdev_resync_ops(struct net_device *dev);
  
  extern struct net_device	*dev_get_by_index(struct net *net, int ifindex);
  extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex);
@@@ -2620,6 -2628,7 +2627,7 @@@ extern int dev_uc_add(struct net_devic
  extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
  extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
  extern int dev_uc_sync(struct net_device *to, struct net_device *from);
+ extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
  extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
  extern void dev_uc_flush(struct net_device *dev);
  extern void dev_uc_init(struct net_device *dev);
@@@ -2631,6 -2640,7 +2639,7 @@@ extern int dev_mc_add_excl(struct net_d
  extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
  extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
  extern int dev_mc_sync(struct net_device *to, struct net_device *from);
+ extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
  extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  extern void dev_mc_flush(struct net_device *dev);
  extern void dev_mc_init(struct net_device *dev);
@@@ -2677,6 -2687,19 +2686,19 @@@ struct sk_buff *skb_gso_segment(struct 
  {
  	return __skb_gso_segment(skb, features, true);
  }
+ __be16 skb_network_protocol(struct sk_buff *skb);
+ 
+ static inline bool can_checksum_protocol(netdev_features_t features,
+ 					 __be16 protocol)
+ {
+ 	return ((features & NETIF_F_GEN_CSUM) ||
+ 		((features & NETIF_F_V4_CSUM) &&
+ 		 protocol == htons(ETH_P_IP)) ||
+ 		((features & NETIF_F_V6_CSUM) &&
+ 		 protocol == htons(ETH_P_IPV6)) ||
+ 		((features & NETIF_F_FCOE_CRC) &&
+ 		 protocol == htons(ETH_P_FCOE)));
+ }
  
  #ifdef CONFIG_BUG
  extern void netdev_rx_csum_fault(struct net_device *dev);
@@@ -2755,6 -2778,11 +2777,11 @@@ static inline void netif_set_gso_max_si
  	dev->gso_max_size = size;
  }
  
+ static inline bool netif_is_bond_master(struct net_device *dev)
+ {
+ 	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
+ }
+ 
  static inline bool netif_is_bond_slave(struct net_device *dev)
  {
  	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
diff --combined include/linux/pci.h
index e73dfa3,43e45ac..3a24e4f
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@@ -35,21 -35,6 +35,21 @@@
  /* Include the ID list */
  #include <linux/pci_ids.h>
  
 +/*
 + * The PCI interface treats multi-function devices as independent
 + * devices.  The slot/function address of each device is encoded
 + * in a single byte as follows:
 + *
 + *	7:3 = slot
 + *	2:0 = function
 + * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined uapi/linux/pci.h
 + * In the interest of not exposing interfaces to user-space unnecessarily,
 + * the following kernel only defines are being added here.
 + */
 +#define PCI_DEVID(bus, devfn)  ((((u16)bus) << 8) | devfn)
 +/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
 +#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
 +
  /* pci_slot represents a physical slot */
  struct pci_slot {
  	struct pci_bus *bus;		/* The bus this slot is on */
@@@ -247,8 -232,6 +247,8 @@@ struct pci_dev 
  	u8		revision;	/* PCI revision, low byte of class word */
  	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
  	u8		pcie_cap;	/* PCI-E capability offset */
 +	u8		msi_cap;	/* MSI capability offset */
 +	u8		msix_cap;	/* MSI-X capability offset */
  	u8		pcie_mpss:3;	/* PCI-E Max Payload Size Supported */
  	u8		rom_base_reg;	/* which config register controls the ROM */
  	u8		pin;  		/* which interrupt pin this device uses */
@@@ -266,7 -249,8 +266,7 @@@
  	pci_power_t     current_state;  /* Current operating state. In ACPI-speak,
  					   this is D0-D3, D0 being fully functional,
  					   and D3 being off. */
 -	int		pm_cap;		/* PM capability offset in the
 -					   configuration space */
 +	u8		pm_cap;		/* PM capability offset */
  	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
  					   can be generated */
  	unsigned int	pme_interrupt:1;
@@@ -364,7 -348,7 +364,7 @@@ static inline struct pci_dev *pci_physf
  	return dev;
  }
  
 -extern struct pci_dev *alloc_pci_dev(void);
 +struct pci_dev *alloc_pci_dev(void);
  
  #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
  #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
@@@ -520,10 -504,10 +520,10 @@@ struct pci_ops 
   * ACPI needs to be able to access PCI config space before we've done a
   * PCI bus scan and created pci_bus structures.
   */
 -extern int raw_pci_read(unsigned int domain, unsigned int bus,
 -			unsigned int devfn, int reg, int len, u32 *val);
 -extern int raw_pci_write(unsigned int domain, unsigned int bus,
 -			unsigned int devfn, int reg, int len, u32 val);
 +int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
 +		 int reg, int len, u32 *val);
 +int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
 +		  int reg, int len, u32 val);
  
  struct pci_bus_region {
  	resource_size_t start;
@@@ -674,7 -658,7 +674,7 @@@ struct pci_driver 
  /* these external functions are only available when PCI support is enabled */
  #ifdef CONFIG_PCI
  
 -extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
 +void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
  
  enum pcie_bus_config_types {
  	PCIE_BUS_TUNE_OFF,
@@@ -691,11 -675,9 +691,11 @@@ extern struct bus_type pci_bus_type
   * code, or pci core code. */
  extern struct list_head pci_root_buses;	/* list of all known PCI buses */
  /* Some device drivers need know if pci is initiated */
 -extern int no_pci_devices(void);
 +int no_pci_devices(void);
  
  void pcibios_resource_survey_bus(struct pci_bus *bus);
 +void pcibios_add_bus(struct pci_bus *bus);
 +void pcibios_remove_bus(struct pci_bus *bus);
  void pcibios_fixup_bus(struct pci_bus *);
  int __must_check pcibios_enable_device(struct pci_dev *, int mask);
  /* Architecture specific versions may override this (weak) */
@@@ -717,7 -699,7 +717,7 @@@ void pcibios_resource_to_bus(struct pci
  void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
  			     struct pci_bus_region *region);
  void pcibios_scan_specific_bus(int busn);
 -extern struct pci_bus *pci_find_bus(int domain, int busnr);
 +struct pci_bus *pci_find_bus(int domain, int busnr);
  void pci_bus_add_devices(const struct pci_bus *bus);
  struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
  				      struct pci_ops *ops, void *sysdata);
@@@ -750,14 -732,14 +750,14 @@@ struct resource *pci_find_parent_resour
  u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
  int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
  u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
 -extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
 -extern void pci_dev_put(struct pci_dev *dev);
 -extern void pci_remove_bus(struct pci_bus *b);
 -extern void pci_stop_and_remove_bus_device(struct pci_dev *dev);
 +struct pci_dev *pci_dev_get(struct pci_dev *dev);
 +void pci_dev_put(struct pci_dev *dev);
 +void pci_remove_bus(struct pci_bus *b);
 +void pci_stop_and_remove_bus_device(struct pci_dev *dev);
  void pci_stop_root_bus(struct pci_bus *bus);
  void pci_remove_root_bus(struct pci_bus *bus);
  void pci_setup_cardbus(struct pci_bus *bus);
 -extern void pci_sort_breadthfirst(void);
 +void pci_sort_breadthfirst(void);
  #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
  #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
  #define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
@@@ -1160,17 -1142,18 +1160,17 @@@ static inline int pci_msi_enabled(void
  	return 0;
  }
  #else
 -extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
 -extern int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec);
 -extern void pci_msi_shutdown(struct pci_dev *dev);
 -extern void pci_disable_msi(struct pci_dev *dev);
 -extern int pci_msix_table_size(struct pci_dev *dev);
 -extern int pci_enable_msix(struct pci_dev *dev,
 -	struct msix_entry *entries, int nvec);
 -extern void pci_msix_shutdown(struct pci_dev *dev);
 -extern void pci_disable_msix(struct pci_dev *dev);
 -extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
 -extern void pci_restore_msi_state(struct pci_dev *dev);
 -extern int pci_msi_enabled(void);
 +int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
 +int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec);
 +void pci_msi_shutdown(struct pci_dev *dev);
 +void pci_disable_msi(struct pci_dev *dev);
 +int pci_msix_table_size(struct pci_dev *dev);
 +int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
 +void pci_msix_shutdown(struct pci_dev *dev);
 +void pci_disable_msix(struct pci_dev *dev);
 +void msi_remove_pci_irq_vectors(struct pci_dev *dev);
 +void pci_restore_msi_state(struct pci_dev *dev);
 +int pci_msi_enabled(void);
  #endif
  
  #ifdef CONFIG_PCIEPORTBUS
@@@ -1185,8 -1168,8 +1185,8 @@@ extern bool pcie_ports_auto
  static inline int pcie_aspm_enabled(void) { return 0; }
  static inline bool pcie_aspm_support_enabled(void) { return false; }
  #else
 -extern int pcie_aspm_enabled(void);
 -extern bool pcie_aspm_support_enabled(void);
 +int pcie_aspm_enabled(void);
 +bool pcie_aspm_support_enabled(void);
  #endif
  
  #ifdef CONFIG_PCIEAER
@@@ -1204,8 -1187,8 +1204,8 @@@ static inline void pcie_set_ecrc_checki
  }
  static inline void pcie_ecrc_get_policy(char *str) {};
  #else
 -extern void pcie_set_ecrc_checking(struct pci_dev *dev);
 -extern void pcie_ecrc_get_policy(char *str);
 +void pcie_set_ecrc_checking(struct pci_dev *dev);
 +void pcie_ecrc_get_policy(char *str);
  #endif
  
  #define pci_enable_msi(pdev)	pci_enable_msi_block(pdev, 1)
@@@ -1216,9 -1199,9 +1216,9 @@@ int  ht_create_irq(struct pci_dev *dev
  void ht_destroy_irq(unsigned int irq);
  #endif /* CONFIG_HT_IRQ */
  
 -extern void pci_cfg_access_lock(struct pci_dev *dev);
 -extern bool pci_cfg_access_trylock(struct pci_dev *dev);
 -extern void pci_cfg_access_unlock(struct pci_dev *dev);
 +void pci_cfg_access_lock(struct pci_dev *dev);
 +bool pci_cfg_access_trylock(struct pci_dev *dev);
 +void pci_cfg_access_unlock(struct pci_dev *dev);
  
  /*
   * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
@@@ -1243,7 -1226,7 +1243,7 @@@ static inline int pci_proc_domain(struc
  /* some architectures require additional setup to direct VGA traffic */
  typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
  		      unsigned int command_bits, u32 flags);
 -extern void pci_register_set_vga_state(arch_set_vga_state_t func);
 +void pci_register_set_vga_state(arch_set_vga_state_t func);
  
  #else /* CONFIG_PCI is not enabled */
  
@@@ -1645,8 -1628,8 +1645,8 @@@ int pcibios_set_pcie_reset_state(struc
  int pcibios_add_device(struct pci_dev *dev);
  
  #ifdef CONFIG_PCI_MMCONFIG
 -extern void __init pci_mmcfg_early_init(void);
 -extern void __init pci_mmcfg_late_init(void);
 +void __init pci_mmcfg_early_init(void);
 +void __init pci_mmcfg_late_init(void);
  #else
  static inline void pci_mmcfg_early_init(void) { }
  static inline void pci_mmcfg_late_init(void) { }
@@@ -1657,12 -1640,13 +1657,13 @@@ int pci_ext_cfg_avail(void)
  void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
  
  #ifdef CONFIG_PCI_IOV
 -extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
 -extern void pci_disable_sriov(struct pci_dev *dev);
 -extern irqreturn_t pci_sriov_migration(struct pci_dev *dev);
 -extern int pci_num_vf(struct pci_dev *dev);
 +int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
 +void pci_disable_sriov(struct pci_dev *dev);
 +irqreturn_t pci_sriov_migration(struct pci_dev *dev);
 +int pci_num_vf(struct pci_dev *dev);
+ int pci_vfs_assigned(struct pci_dev *dev);
 -extern int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
 -extern int pci_sriov_get_totalvfs(struct pci_dev *dev);
 +int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
 +int pci_sriov_get_totalvfs(struct pci_dev *dev);
  #else
  static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
  {
@@@ -1679,6 -1663,10 +1680,10 @@@ static inline int pci_num_vf(struct pci
  {
  	return 0;
  }
+ static inline int pci_vfs_assigned(struct pci_dev *dev)
+ {
+ 	return 0;
+ }
  static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
  {
  	return 0;
@@@ -1690,8 -1678,8 +1695,8 @@@ static inline int pci_sriov_get_totalvf
  #endif
  
  #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
 -extern void pci_hp_create_module_link(struct pci_slot *pci_slot);
 -extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
 +void pci_hp_create_module_link(struct pci_slot *pci_slot);
 +void pci_hp_remove_module_link(struct pci_slot *pci_slot);
  #endif
  
  /**
@@@ -1835,13 -1823,13 +1840,13 @@@ int pci_vpd_find_info_keyword(const u8 
  /* PCI <-> OF binding helpers */
  #ifdef CONFIG_OF
  struct device_node;
 -extern void pci_set_of_node(struct pci_dev *dev);
 -extern void pci_release_of_node(struct pci_dev *dev);
 -extern void pci_set_bus_of_node(struct pci_bus *bus);
 -extern void pci_release_bus_of_node(struct pci_bus *bus);
 +void pci_set_of_node(struct pci_dev *dev);
 +void pci_release_of_node(struct pci_dev *dev);
 +void pci_set_bus_of_node(struct pci_bus *bus);
 +void pci_release_bus_of_node(struct pci_bus *bus);
  
  /* Arch may override this (weak) */
 -extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus);
 +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
  
  static inline struct device_node *
  pci_device_to_OF_node(const struct pci_dev *pdev)
diff --combined include/net/bluetooth/bluetooth.h
index ea81f0e,6912ef9..10eb9b3
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@@ -193,11 -193,11 +193,11 @@@ static inline bool bdaddr_type_is_le(__
  #define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
  
  /* Copy, swap, convert BD Address */
- static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
+ static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
  {
  	return memcmp(ba1, ba2, sizeof(bdaddr_t));
  }
- static inline void bacpy(bdaddr_t *dst, bdaddr_t *src)
+ static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src)
  {
  	memcpy(dst, src, sizeof(bdaddr_t));
  }
@@@ -226,12 -226,13 +226,12 @@@ struct bt_sock_list 
  	struct hlist_head head;
  	rwlock_t          lock;
  #ifdef CONFIG_PROC_FS
 -        struct file_operations   fops;
          int (* custom_seq_show)(struct seq_file *, void *);
  #endif
  };
  
  int  bt_sock_register(int proto, const struct net_proto_family *ops);
- int  bt_sock_unregister(int proto);
+ void bt_sock_unregister(int proto);
  void bt_sock_link(struct bt_sock_list *l, struct sock *s);
  void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
  int  bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
@@@ -259,12 -260,23 +259,23 @@@ struct l2cap_ctrl 
  	__u8		retries;
  };
  
+ struct hci_dev;
+ 
+ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status);
+ 
+ struct hci_req_ctrl {
+ 	bool			start;
+ 	u8			event;
+ 	hci_req_complete_t	complete;
+ };
+ 
  struct bt_skb_cb {
  	__u8 pkt_type;
  	__u8 incoming;
  	__u16 expect;
  	__u8 force_active;
  	struct l2cap_ctrl control;
+ 	struct hci_req_ctrl req;
  };
  #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
  
@@@ -318,7 -330,7 +329,7 @@@ extern void hci_sock_cleanup(void)
  extern int bt_sysfs_init(void);
  extern void bt_sysfs_cleanup(void);
  
 -extern int  bt_procfs_init(struct module* module, struct net *net, const char *name,
 +extern int  bt_procfs_init(struct net *net, const char *name,
  			   struct bt_sock_list* sk_list,
  			   int (* seq_show)(struct seq_file *, void *));
  extern void bt_procfs_cleanup(struct net *net, const char *name);
diff --combined net/batman-adv/routing.c
index 7de0336,2f1f889..b27a4d7
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@@ -29,6 -29,7 +29,7 @@@
  #include "unicast.h"
  #include "bridge_loop_avoidance.h"
  #include "distributed-arp-table.h"
+ #include "network-coding.h"
  
  static int batadv_route_unicast_packet(struct sk_buff *skb,
  				       struct batadv_hard_iface *recv_if);
@@@ -548,6 -549,17 +549,17 @@@ batadv_find_ifalter_router(struct batad
  	return router;
  }
  
+ /**
+  * batadv_check_unicast_packet - Check for malformed unicast packets
+  * @bat_priv: the bat priv with all the soft interface information
+  * @skb: packet to check
+  * @hdr_size: size of header to pull
+  *
+  * Check for short header and bad addresses in given packet. Returns negative
+  * value when check fails and 0 otherwise. The negative value depends on the
+  * reason: -ENODATA for bad header, -EBADR for broadcast destination or source,
+  * and -EREMOTE for non-local (other host) destination.
+  */
  static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
  				       struct sk_buff *skb, int hdr_size)
  {
@@@ -555,21 -567,21 +567,21 @@@
  
  	/* drop packet if it has not necessary minimum size */
  	if (unlikely(!pskb_may_pull(skb, hdr_size)))
- 		return -1;
+ 		return -ENODATA;
  
  	ethhdr = (struct ethhdr *)skb_mac_header(skb);
  
  	/* packet with unicast indication but broadcast recipient */
  	if (is_broadcast_ether_addr(ethhdr->h_dest))
- 		return -1;
+ 		return -EBADR;
  
  	/* packet with broadcast sender address */
  	if (is_broadcast_ether_addr(ethhdr->h_source))
- 		return -1;
+ 		return -EBADR;
  
  	/* not for me */
  	if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
- 		return -1;
+ 		return -EREMOTE;
  
  	return 0;
  }
@@@ -852,14 -864,17 +864,17 @@@ static int batadv_route_unicast_packet(
  	/* decrement ttl */
  	unicast_packet->header.ttl--;
  
- 	/* Update stats counter */
- 	batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
- 	batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
- 			   skb->len + ETH_HLEN);
- 
- 	/* route it */
- 	if (batadv_send_skb_to_orig(skb, orig_node, recv_if))
+ 	/* network code packet if possible */
+ 	if (batadv_nc_skb_forward(skb, neigh_node, ethhdr)) {
  		ret = NET_RX_SUCCESS;
+ 	} else if (batadv_send_skb_to_orig(skb, orig_node, recv_if)) {
+ 		ret = NET_RX_SUCCESS;
+ 
+ 		/* Update stats counter */
+ 		batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
+ 		batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
+ 				   skb->len + ETH_HLEN);
+ 	}
  
  out:
  	if (neigh_node)
@@@ -924,7 -939,7 +939,7 @@@ out
  }
  
  static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
 -				     struct sk_buff *skb) {
 +				     struct sk_buff *skb, int hdr_len) {
  	uint8_t curr_ttvn, old_ttvn;
  	struct batadv_orig_node *orig_node;
  	struct ethhdr *ethhdr;
@@@ -933,7 -948,7 +948,7 @@@
  	int is_old_ttvn;
  
  	/* check if there is enough data before accessing it */
 -	if (pskb_may_pull(skb, sizeof(*unicast_packet) + ETH_HLEN) < 0)
 +	if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0)
  		return 0;
  
  	/* create a copy of the skb (in case of for re-routing) to modify it. */
@@@ -941,7 -956,7 +956,7 @@@
  		return 0;
  
  	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 -	ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
 +	ethhdr = (struct ethhdr *)(skb->data + hdr_len);
  
  	/* check if the destination client was served by this node and it is now
  	 * roaming. In this case, it means that the node has got a ROAM_ADV
@@@ -1035,7 -1050,7 +1050,7 @@@ int batadv_recv_unicast_packet(struct s
  	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
  	uint8_t *orig_addr;
  	struct batadv_orig_node *orig_node = NULL;
- 	int hdr_size = sizeof(*unicast_packet);
+ 	int check, hdr_size = sizeof(*unicast_packet);
  	bool is4addr;
  
  	unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@@ -1046,9 -1061,19 +1061,18 @@@
  	if (is4addr)
  		hdr_size = sizeof(*unicast_4addr_packet);
  
- 	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
+ 	/* function returns -EREMOTE for promiscuous packets */
+ 	check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
+ 
+ 	/* Even though the packet is not for us, we might save it to use for
+ 	 * decoding a later received coded packet
+ 	 */
+ 	if (check == -EREMOTE)
+ 		batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
+ 
+ 	if (check < 0)
  		return NET_RX_DROP;
 -
 -	if (!batadv_check_unicast_ttvn(bat_priv, skb))
 +	if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
  		return NET_RX_DROP;
  
  	/* packet for me */
@@@ -1092,7 -1117,7 +1116,7 @@@ int batadv_recv_ucast_frag_packet(struc
  	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
  		return NET_RX_DROP;
  
 -	if (!batadv_check_unicast_ttvn(bat_priv, skb))
 +	if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
  		return NET_RX_DROP;
  
  	unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
diff --combined net/bluetooth/af_bluetooth.c
index d1b3d15,e5338f7..9096137
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@@ -92,23 -92,14 +92,14 @@@ int bt_sock_register(int proto, const s
  }
  EXPORT_SYMBOL(bt_sock_register);
  
- int bt_sock_unregister(int proto)
+ void bt_sock_unregister(int proto)
  {
- 	int err = 0;
- 
  	if (proto < 0 || proto >= BT_MAX_PROTO)
- 		return -EINVAL;
+ 		return;
  
  	write_lock(&bt_proto_lock);
- 
- 	if (!bt_proto[proto])
- 		err = -ENOENT;
- 	else
- 		bt_proto[proto] = NULL;
- 
+ 	bt_proto[proto] = NULL;
  	write_unlock(&bt_proto_lock);
- 
- 	return err;
  }
  EXPORT_SYMBOL(bt_sock_unregister);
  
@@@ -422,7 -413,8 +413,8 @@@ unsigned int bt_sock_poll(struct file *
  		return bt_accept_poll(sk);
  
  	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- 		mask |= POLLERR;
+ 		mask |= POLLERR |
+ 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
  
  	if (sk->sk_shutdown & RCV_SHUTDOWN)
  		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
@@@ -617,7 -609,7 +609,7 @@@ static int bt_seq_open(struct inode *in
  	struct bt_sock_list *sk_list;
  	struct bt_seq_state *s;
  
 -	sk_list = PDE(inode)->data;
 +	sk_list = PDE_DATA(inode);
  	s = __seq_open_private(file, &bt_seq_ops,
  			       sizeof(struct bt_seq_state));
  	if (!s)
@@@ -627,21 -619,26 +619,21 @@@
  	return 0;
  }
  
 -int bt_procfs_init(struct module* module, struct net *net, const char *name,
 +static const struct file_operations bt_fops = {
 +	.open = bt_seq_open,
 +	.read = seq_read,
 +	.llseek = seq_lseek,
 +	.release = seq_release_private
 +};
 +
 +int bt_procfs_init(struct net *net, const char *name,
  		   struct bt_sock_list* sk_list,
  		   int (* seq_show)(struct seq_file *, void *))
  {
 -	struct proc_dir_entry * pde;
 -
  	sk_list->custom_seq_show = seq_show;
  
 -	sk_list->fops.owner     = module;
 -	sk_list->fops.open      = bt_seq_open;
 -	sk_list->fops.read      = seq_read;
 -	sk_list->fops.llseek    = seq_lseek;
 -	sk_list->fops.release   = seq_release_private;
 -
 -	pde = proc_create(name, 0, net->proc_net, &sk_list->fops);
 -	if (!pde)
 +	if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
  		return -ENOMEM;
 -
 -	pde->data = sk_list;
 -
  	return 0;
  }
  
@@@ -650,7 -647,7 +642,7 @@@ void bt_procfs_cleanup(struct net *net
  	remove_proc_entry(name, net->proc_net);
  }
  #else
 -int bt_procfs_init(struct module* module, struct net *net, const char *name,
 +int bt_procfs_init(struct net *net, const char *name,
  		   struct bt_sock_list* sk_list,
  		   int (* seq_show)(struct seq_file *, void *))
  {
diff --combined net/bluetooth/bnep/sock.c
index d4686fb,5b1c04e..5f05129
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@@ -234,7 -234,7 +234,7 @@@ int __init bnep_sock_init(void
  		goto error;
  	}
  
 -	err = bt_procfs_init(THIS_MODULE, &init_net, "bnep", &bnep_sk_list, NULL);
 +	err = bt_procfs_init(&init_net, "bnep", &bnep_sk_list, NULL);
  	if (err < 0) {
  		BT_ERR("Failed to create BNEP proc file");
  		bt_sock_unregister(BTPROTO_BNEP);
@@@ -253,8 -253,6 +253,6 @@@ error
  void __exit bnep_sock_cleanup(void)
  {
  	bt_procfs_cleanup(&init_net, "bnep");
- 	if (bt_sock_unregister(BTPROTO_BNEP) < 0)
- 		BT_ERR("Can't unregister BNEP socket");
- 
+ 	bt_sock_unregister(BTPROTO_BNEP);
  	proto_unregister(&bnep_proto);
  }
diff --combined net/bluetooth/cmtp/sock.c
index 03f26bf,58d9ede..d82787d
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@@ -245,7 -245,7 +245,7 @@@ int cmtp_init_sockets(void
  		goto error;
  	}
  
 -	err = bt_procfs_init(THIS_MODULE, &init_net, "cmtp", &cmtp_sk_list, NULL);
 +	err = bt_procfs_init(&init_net, "cmtp", &cmtp_sk_list, NULL);
  	if (err < 0) {
  		BT_ERR("Failed to create CMTP proc file");
  		bt_sock_unregister(BTPROTO_HIDP);
@@@ -264,8 -264,6 +264,6 @@@ error
  void cmtp_cleanup_sockets(void)
  {
  	bt_procfs_cleanup(&init_net, "cmtp");
- 	if (bt_sock_unregister(BTPROTO_CMTP) < 0)
- 		BT_ERR("Can't unregister CMTP socket");
- 
+ 	bt_sock_unregister(BTPROTO_CMTP);
  	proto_unregister(&cmtp_proto);
  }
diff --combined net/bluetooth/hci_sock.c
index 6ad2395,aa4354f..9bd7d95
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@@ -854,6 -854,11 +854,11 @@@ static int hci_sock_sendmsg(struct kioc
  			skb_queue_tail(&hdev->raw_q, skb);
  			queue_work(hdev->workqueue, &hdev->tx_work);
  		} else {
+ 			/* Stand-alone HCI commands must be flaged as
+ 			 * single-command requests.
+ 			 */
+ 			bt_cb(skb)->req.start = true;
+ 
  			skb_queue_tail(&hdev->cmd_q, skb);
  			queue_work(hdev->workqueue, &hdev->cmd_work);
  		}
@@@ -1102,7 -1107,7 +1107,7 @@@ int __init hci_sock_init(void
  		goto error;
  	}
  
 -	err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL);
 +	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
  	if (err < 0) {
  		BT_ERR("Failed to create HCI proc file");
  		bt_sock_unregister(BTPROTO_HCI);
@@@ -1121,8 -1126,6 +1126,6 @@@ error
  void hci_sock_cleanup(void)
  {
  	bt_procfs_cleanup(&init_net, "hci");
- 	if (bt_sock_unregister(BTPROTO_HCI) < 0)
- 		BT_ERR("HCI socket unregistration failed");
- 
+ 	bt_sock_unregister(BTPROTO_HCI);
  	proto_unregister(&hci_sk_proto);
  }
diff --combined net/bluetooth/hidp/sock.c
index e7e04d4,2f4cbb0..cb3fdde
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@@ -77,21 -77,12 +77,12 @@@ static int hidp_sock_ioctl(struct socke
  			return err;
  		}
  
- 		if (csock->sk->sk_state != BT_CONNECTED ||
- 				isock->sk->sk_state != BT_CONNECTED) {
- 			sockfd_put(csock);
- 			sockfd_put(isock);
- 			return -EBADFD;
- 		}
+ 		err = hidp_connection_add(&ca, csock, isock);
+ 		if (!err && copy_to_user(argp, &ca, sizeof(ca)))
+ 			err = -EFAULT;
  
- 		err = hidp_add_connection(&ca, csock, isock);
- 		if (!err) {
- 			if (copy_to_user(argp, &ca, sizeof(ca)))
- 				err = -EFAULT;
- 		} else {
- 			sockfd_put(csock);
- 			sockfd_put(isock);
- 		}
+ 		sockfd_put(csock);
+ 		sockfd_put(isock);
  
  		return err;
  
@@@ -102,7 -93,7 +93,7 @@@
  		if (copy_from_user(&cd, argp, sizeof(cd)))
  			return -EFAULT;
  
- 		return hidp_del_connection(&cd);
+ 		return hidp_connection_del(&cd);
  
  	case HIDPGETCONNLIST:
  		if (copy_from_user(&cl, argp, sizeof(cl)))
@@@ -284,7 -275,7 +275,7 @@@ int __init hidp_init_sockets(void
  		goto error;
  	}
  
 -	err = bt_procfs_init(THIS_MODULE, &init_net, "hidp", &hidp_sk_list, NULL);
 +	err = bt_procfs_init(&init_net, "hidp", &hidp_sk_list, NULL);
  	if (err < 0) {
  		BT_ERR("Failed to create HIDP proc file");
  		bt_sock_unregister(BTPROTO_HIDP);
@@@ -296,7 -287,6 +287,6 @@@
  	return 0;
  
  error:
- 	BT_ERR("Can't register HIDP socket");
  	proto_unregister(&hidp_proto);
  	return err;
  }
@@@ -304,8 -294,6 +294,6 @@@
  void __exit hidp_cleanup_sockets(void)
  {
  	bt_procfs_cleanup(&init_net, "hidp");
- 	if (bt_sock_unregister(BTPROTO_HIDP) < 0)
- 		BT_ERR("Can't unregister HIDP socket");
- 
+ 	bt_sock_unregister(BTPROTO_HIDP);
  	proto_unregister(&hidp_proto);
  }
diff --combined net/bluetooth/l2cap_sock.c
index fe15960,141e7b0..36fed40
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@@ -43,6 -43,12 +43,12 @@@ static void l2cap_sock_init(struct soc
  static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
  				     int proto, gfp_t prio);
  
+ bool l2cap_is_socket(struct socket *sock)
+ {
+ 	return sock && sock->ops == &l2cap_sock_ops;
+ }
+ EXPORT_SYMBOL(l2cap_is_socket);
+ 
  static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
  {
  	struct sock *sk = sock->sk;
@@@ -1292,7 -1298,7 +1298,7 @@@ int __init l2cap_init_sockets(void
  		goto error;
  	}
  
 -	err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list,
 +	err = bt_procfs_init(&init_net, "l2cap", &l2cap_sk_list,
  			     NULL);
  	if (err < 0) {
  		BT_ERR("Failed to create L2CAP proc file");
@@@ -1312,8 -1318,6 +1318,6 @@@ error
  void l2cap_cleanup_sockets(void)
  {
  	bt_procfs_cleanup(&init_net, "l2cap");
- 	if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
- 		BT_ERR("L2CAP socket unregistration failed");
- 
+ 	bt_sock_unregister(BTPROTO_L2CAP);
  	proto_unregister(&l2cap_proto);
  }
diff --combined net/bluetooth/rfcomm/sock.c
index 4b6eeaf,a8638b5..30b3721
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@@ -1037,7 -1037,7 +1037,7 @@@ int __init rfcomm_init_sockets(void
  		goto error;
  	}
  
 -	err = bt_procfs_init(THIS_MODULE, &init_net, "rfcomm", &rfcomm_sk_list, NULL);
 +	err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL);
  	if (err < 0) {
  		BT_ERR("Failed to create RFCOMM proc file");
  		bt_sock_unregister(BTPROTO_RFCOMM);
@@@ -1066,8 -1066,7 +1066,7 @@@ void __exit rfcomm_cleanup_sockets(void
  
  	debugfs_remove(rfcomm_sock_debugfs);
  
- 	if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
- 		BT_ERR("RFCOMM socket layer unregistration failed");
+ 	bt_sock_unregister(BTPROTO_RFCOMM);
  
  	proto_unregister(&rfcomm_proto);
  }
diff --combined net/bluetooth/sco.c
index 37bc712,373d81e..e7bd4ee
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@@ -83,7 -83,7 +83,7 @@@ static struct sco_conn *sco_conn_add(st
  	if (conn)
  		return conn;
  
- 	conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
+ 	conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
  	if (!conn)
  		return NULL;
  
@@@ -185,7 -185,7 +185,7 @@@ static int sco_connect(struct sock *sk
  
  	conn = sco_conn_add(hcon);
  	if (!conn) {
- 		hci_conn_put(hcon);
+ 		hci_conn_drop(hcon);
  		err = -ENOMEM;
  		goto done;
  	}
@@@ -353,7 -353,7 +353,7 @@@ static void __sco_sock_close(struct soc
  		if (sco_pi(sk)->conn->hcon) {
  			sk->sk_state = BT_DISCONN;
  			sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
- 			hci_conn_put(sco_pi(sk)->conn->hcon);
+ 			hci_conn_drop(sco_pi(sk)->conn->hcon);
  			sco_pi(sk)->conn->hcon = NULL;
  		} else
  			sco_chan_del(sk, ECONNRESET);
@@@ -481,8 -481,7 +481,7 @@@ static int sco_sock_connect(struct sock
  {
  	struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
  	struct sock *sk = sock->sk;
- 	int err = 0;
- 
+ 	int err;
  
  	BT_DBG("sk %p", sk);
  
@@@ -653,6 -652,42 +652,42 @@@ static int sco_sock_sendmsg(struct kioc
  	return err;
  }
  
+ static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 
+ 	BT_DBG("conn %p", conn);
+ 
+ 	conn->state = BT_CONFIG;
+ 
+ 	if (!lmp_esco_capable(hdev)) {
+ 		struct hci_cp_accept_conn_req cp;
+ 
+ 		bacpy(&cp.bdaddr, &conn->dst);
+ 
+ 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
+ 			cp.role = 0x00; /* Become master */
+ 		else
+ 			cp.role = 0x01; /* Remain slave */
+ 
+ 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
+ 	} else {
+ 		struct hci_cp_accept_sync_conn_req cp;
+ 
+ 		bacpy(&cp.bdaddr, &conn->dst);
+ 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
+ 
+ 		cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
+ 		cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
+ 		cp.max_latency    = __constant_cpu_to_le16(0xffff);
+ 		cp.content_format = cpu_to_le16(hdev->voice_setting);
+ 		cp.retrans_effort = 0xff;
+ 
+ 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
+ 			     sizeof(cp), &cp);
+ 	}
+ }
+ 
  static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
  			    struct msghdr *msg, size_t len, int flags)
  {
@@@ -663,7 -698,7 +698,7 @@@
  
  	if (sk->sk_state == BT_CONNECT2 &&
  	    test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
- 		hci_conn_accept(pi->conn->hcon, 0);
+ 		sco_conn_defer_accept(pi->conn->hcon, 0);
  		sk->sk_state = BT_CONFIG;
  		msg->msg_namelen = 0;
  
@@@ -883,7 -918,7 +918,7 @@@ static void sco_chan_del(struct sock *s
  		sco_conn_unlock(conn);
  
  		if (conn->hcon)
- 			hci_conn_put(conn->hcon);
+ 			hci_conn_drop(conn->hcon);
  	}
  
  	sk->sk_state = BT_CLOSED;
@@@ -1084,7 -1119,7 +1119,7 @@@ int __init sco_init(void
  		goto error;
  	}
  
 -	err = bt_procfs_init(THIS_MODULE, &init_net, "sco", &sco_sk_list, NULL);
 +	err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL);
  	if (err < 0) {
  		BT_ERR("Failed to create SCO proc file");
  		bt_sock_unregister(BTPROTO_SCO);
@@@ -1113,8 -1148,7 +1148,7 @@@ void __exit sco_exit(void
  
  	debugfs_remove(sco_debugfs);
  
- 	if (bt_sock_unregister(BTPROTO_SCO) < 0)
- 		BT_ERR("SCO socket unregistration failed");
+ 	bt_sock_unregister(BTPROTO_SCO);
  
  	proto_unregister(&sco_proto);
  }
diff --combined net/core/neighbour.c
index 537301a,89a3a07..5c56b21
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@@ -39,21 -39,13 +39,13 @@@
  #include <linux/string.h>
  #include <linux/log2.h>
  
+ #define DEBUG
  #define NEIGH_DEBUG 1
- 
- #define NEIGH_PRINTK(x...) printk(x)
- #define NEIGH_NOPRINTK(x...) do { ; } while(0)
- #define NEIGH_PRINTK1 NEIGH_NOPRINTK
- #define NEIGH_PRINTK2 NEIGH_NOPRINTK
- 
- #if NEIGH_DEBUG >= 1
- #undef NEIGH_PRINTK1
- #define NEIGH_PRINTK1 NEIGH_PRINTK
- #endif
- #if NEIGH_DEBUG >= 2
- #undef NEIGH_PRINTK2
- #define NEIGH_PRINTK2 NEIGH_PRINTK
- #endif
+ #define neigh_dbg(level, fmt, ...)		\
+ do {						\
+ 	if (level <= NEIGH_DEBUG)		\
+ 		pr_debug(fmt, ##__VA_ARGS__);	\
+ } while (0)
  
  #define PNEIGH_HASHMASK		0xF
  
@@@ -246,7 -238,7 +238,7 @@@ static void neigh_flush_dev(struct neig
  					n->nud_state = NUD_NOARP;
  				else
  					n->nud_state = NUD_NONE;
- 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
+ 				neigh_dbg(2, "neigh %p is stray\n", n);
  			}
  			write_unlock(&n->lock);
  			neigh_cleanup_and_release(n);
@@@ -542,7 -534,7 +534,7 @@@ struct neighbour *__neigh_create(struc
  						     lockdep_is_held(&tbl->lock)));
  	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
  	write_unlock_bh(&tbl->lock);
- 	NEIGH_PRINTK2("neigh %p is created.\n", n);
+ 	neigh_dbg(2, "neigh %p is created\n", n);
  	rc = n;
  out:
  	return rc;
@@@ -725,7 -717,7 +717,7 @@@ void neigh_destroy(struct neighbour *ne
  	dev_put(dev);
  	neigh_parms_put(neigh->parms);
  
- 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
+ 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
  
  	atomic_dec(&neigh->tbl->entries);
  	kfree_rcu(neigh, rcu);
@@@ -739,7 -731,7 +731,7 @@@ EXPORT_SYMBOL(neigh_destroy)
   */
  static void neigh_suspect(struct neighbour *neigh)
  {
- 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
+ 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
  
  	neigh->output = neigh->ops->output;
  }
@@@ -751,7 -743,7 +743,7 @@@
   */
  static void neigh_connect(struct neighbour *neigh)
  {
- 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
+ 	neigh_dbg(2, "neigh %p is connected\n", neigh);
  
  	neigh->output = neigh->ops->connected_output;
  }
@@@ -852,7 -844,7 +844,7 @@@ static void neigh_invalidate(struct nei
  	struct sk_buff *skb;
  
  	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
- 	NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
+ 	neigh_dbg(2, "neigh %p is failed\n", neigh);
  	neigh->updated = jiffies;
  
  	/* It is very thin place. report_unreachable is very complicated
@@@ -904,17 -896,17 +896,17 @@@ static void neigh_timer_handler(unsigne
  	if (state & NUD_REACHABLE) {
  		if (time_before_eq(now,
  				   neigh->confirmed + neigh->parms->reachable_time)) {
- 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
+ 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
  			next = neigh->confirmed + neigh->parms->reachable_time;
  		} else if (time_before_eq(now,
  					  neigh->used + neigh->parms->delay_probe_time)) {
- 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
+ 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
  			neigh->nud_state = NUD_DELAY;
  			neigh->updated = jiffies;
  			neigh_suspect(neigh);
  			next = now + neigh->parms->delay_probe_time;
  		} else {
- 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
+ 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
  			neigh->nud_state = NUD_STALE;
  			neigh->updated = jiffies;
  			neigh_suspect(neigh);
@@@ -923,14 -915,14 +915,14 @@@
  	} else if (state & NUD_DELAY) {
  		if (time_before_eq(now,
  				   neigh->confirmed + neigh->parms->delay_probe_time)) {
- 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
+ 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
  			neigh->nud_state = NUD_REACHABLE;
  			neigh->updated = jiffies;
  			neigh_connect(neigh);
  			notify = 1;
  			next = neigh->confirmed + neigh->parms->reachable_time;
  		} else {
- 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
+ 			neigh_dbg(2, "neigh %p is probed\n", neigh);
  			neigh->nud_state = NUD_PROBE;
  			neigh->updated = jiffies;
  			atomic_set(&neigh->probes, 0);
@@@ -997,7 -989,7 +989,7 @@@ int __neigh_event_send(struct neighbou
  			return 1;
  		}
  	} else if (neigh->nud_state & NUD_STALE) {
- 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
+ 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
  		neigh->nud_state = NUD_DELAY;
  		neigh->updated = jiffies;
  		neigh_add_timer(neigh,
@@@ -1320,8 -1312,7 +1312,7 @@@ int neigh_resolve_output(struct neighbo
  out:
  	return rc;
  discard:
- 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
- 		      dst, neigh);
+ 	neigh_dbg(1, "%s: dst=%p neigh=%p\n", __func__, dst, neigh);
  out_kfree_skb:
  	rc = -EINVAL;
  	kfree_skb(skb);
@@@ -1498,7 -1489,7 +1489,7 @@@ void neigh_parms_release(struct neigh_t
  		}
  	}
  	write_unlock_bh(&tbl->lock);
- 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
+ 	neigh_dbg(1, "%s: not found\n", __func__);
  }
  EXPORT_SYMBOL(neigh_parms_release);
  
@@@ -1613,7 -1604,7 +1604,7 @@@ int neigh_table_clear(struct neigh_tabl
  }
  EXPORT_SYMBOL(neigh_table_clear);
  
- static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
  	struct net *net = sock_net(skb->sk);
  	struct ndmsg *ndm;
@@@ -1677,7 -1668,7 +1668,7 @@@ out
  	return err;
  }
  
- static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
  	struct net *net = sock_net(skb->sk);
  	struct ndmsg *ndm;
@@@ -1955,7 -1946,7 +1946,7 @@@ static const struct nla_policy nl_ntbl_
  	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
  };
  
- static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
  	struct net *net = sock_net(skb->sk);
  	struct neigh_table *tbl;
@@@ -2714,7 -2705,7 +2705,7 @@@ static int neigh_stat_seq_open(struct i
  
  	if (!ret) {
  		struct seq_file *sf = file->private_data;
 -		sf->private = PDE(inode)->data;
 +		sf->private = PDE_DATA(inode);
  	}
  	return ret;
  };
diff --combined net/ipv4/tcp_ipv4.c
index fc55a1c,8ea9751..7196523
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@@ -838,7 -838,6 +838,6 @@@ static void tcp_v4_reqsk_send_ack(struc
   */
  static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
  			      struct request_sock *req,
- 			      struct request_values *rvp,
  			      u16 queue_mapping,
  			      bool nocache)
  {
@@@ -851,7 -850,7 +850,7 @@@
  	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
  		return -1;
  
- 	skb = tcp_make_synack(sk, dst, req, rvp, NULL);
+ 	skb = tcp_make_synack(sk, dst, req, NULL);
  
  	if (skb) {
  		__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
@@@ -868,10 -867,9 +867,9 @@@
  	return err;
  }
  
- static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
- 			     struct request_values *rvp)
+ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
  {
- 	int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
+ 	int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
  
  	if (!res)
  		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@@ -1371,8 -1369,7 +1369,7 @@@ static bool tcp_fastopen_check(struct s
  static int tcp_v4_conn_req_fastopen(struct sock *sk,
  				    struct sk_buff *skb,
  				    struct sk_buff *skb_synack,
- 				    struct request_sock *req,
- 				    struct request_values *rvp)
+ 				    struct request_sock *req)
  {
  	struct tcp_sock *tp = tcp_sk(sk);
  	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
@@@ -1467,9 -1464,7 +1464,7 @@@
  
  int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
  {
- 	struct tcp_extend_values tmp_ext;
  	struct tcp_options_received tmp_opt;
- 	const u8 *hash_location;
  	struct request_sock *req;
  	struct inet_request_sock *ireq;
  	struct tcp_sock *tp = tcp_sk(sk);
@@@ -1519,42 -1514,7 +1514,7 @@@
  	tcp_clear_options(&tmp_opt);
  	tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
  	tmp_opt.user_mss  = tp->rx_opt.user_mss;
- 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
- 	    want_cookie ? NULL : &foc);
- 
- 	if (tmp_opt.cookie_plus > 0 &&
- 	    tmp_opt.saw_tstamp &&
- 	    !tp->rx_opt.cookie_out_never &&
- 	    (sysctl_tcp_cookie_size > 0 ||
- 	     (tp->cookie_values != NULL &&
- 	      tp->cookie_values->cookie_desired > 0))) {
- 		u8 *c;
- 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
- 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
- 
- 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
- 			goto drop_and_release;
- 
- 		/* Secret recipe starts with IP addresses */
- 		*mess++ ^= (__force u32)daddr;
- 		*mess++ ^= (__force u32)saddr;
- 
- 		/* plus variable length Initiator Cookie */
- 		c = (u8 *)mess;
- 		while (l-- > 0)
- 			*c++ ^= *hash_location++;
- 
- 		want_cookie = false;	/* not our kind of cookie */
- 		tmp_ext.cookie_out_never = 0; /* false */
- 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
- 	} else if (!tp->rx_opt.cookie_in_always) {
- 		/* redundant indications, but ensure initialization. */
- 		tmp_ext.cookie_out_never = 1; /* true */
- 		tmp_ext.cookie_plus = 0;
- 	} else {
- 		goto drop_and_release;
- 	}
- 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
+ 	tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
  
  	if (want_cookie && !tmp_opt.saw_tstamp)
  		tcp_clear_options(&tmp_opt);
@@@ -1636,7 -1596,6 +1596,6 @@@
  	 * of tcp_v4_send_synack()->tcp_select_initial_window().
  	 */
  	skb_synack = tcp_make_synack(sk, dst, req,
- 	    (struct request_values *)&tmp_ext,
  	    fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
  
  	if (skb_synack) {
@@@ -1660,8 -1619,7 +1619,7 @@@
  		if (fastopen_cookie_present(&foc) && foc.len != 0)
  			NET_INC_STATS_BH(sock_net(sk),
  			    LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
- 	} else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
- 	    (struct request_values *)&tmp_ext))
+ 	} else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
  		goto drop_and_free;
  
  	return 0;
@@@ -1908,6 -1866,7 +1866,7 @@@ discard
  	return 0;
  
  csum_err:
+ 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
  	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
  	goto discard;
  }
@@@ -1950,6 -1909,50 +1909,51 @@@ void tcp_v4_early_demux(struct sk_buff 
  	}
  }
  
+ /* Packet is added to VJ-style prequeue for processing in process
+  * context, if a reader task is waiting. Apparently, this exciting
+  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
+  * failed somewhere. Latency? Burstiness? Well, at least now we will
+  * see, why it failed. 8)8)				  --ANK
+  *
+  */
+ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 
+ 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
+ 		return false;
+ 
+ 	if (skb->len <= tcp_hdrlen(skb) &&
+ 	    skb_queue_len(&tp->ucopy.prequeue) == 0)
+ 		return false;
+ 
++	skb_dst_force(skb);
+ 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
+ 	tp->ucopy.memory += skb->truesize;
+ 	if (tp->ucopy.memory > sk->sk_rcvbuf) {
+ 		struct sk_buff *skb1;
+ 
+ 		BUG_ON(sock_owned_by_user(sk));
+ 
+ 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
+ 			sk_backlog_rcv(sk, skb1);
+ 			NET_INC_STATS_BH(sock_net(sk),
+ 					 LINUX_MIB_TCPPREQUEUEDROPPED);
+ 		}
+ 
+ 		tp->ucopy.memory = 0;
+ 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
+ 		wake_up_interruptible_sync_poll(sk_sleep(sk),
+ 					   POLLIN | POLLRDNORM | POLLRDBAND);
+ 		if (!inet_csk_ack_scheduled(sk))
+ 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ 						  (3 * tcp_rto_min(sk)) / 4,
+ 						  TCP_RTO_MAX);
+ 	}
+ 	return true;
+ }
+ EXPORT_SYMBOL(tcp_prequeue);
+ 
  /*
   *	From tcp_input.c
   */
@@@ -1983,7 -1986,7 +1987,7 @@@ int tcp_v4_rcv(struct sk_buff *skb
  	 * provided case of th->doff==0 is eliminated.
  	 * So, we defer the checks. */
  	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
- 		goto bad_packet;
+ 		goto csum_error;
  
  	th = tcp_hdr(skb);
  	iph = ip_hdr(skb);
@@@ -2049,6 -2052,8 +2053,8 @@@ no_tcp_socket
  		goto discard_it;
  
  	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
+ csum_error:
+ 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
  bad_packet:
  		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
  	} else {
@@@ -2070,10 -2075,13 +2076,13 @@@ do_time_wait
  		goto discard_it;
  	}
  
- 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
- 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+ 	if (skb->len < (th->doff << 2)) {
  		inet_twsk_put(inet_twsk(sk));
- 		goto discard_it;
+ 		goto bad_packet;
+ 	}
+ 	if (tcp_checksum_complete(skb)) {
+ 		inet_twsk_put(inet_twsk(sk));
+ 		goto csum_error;
  	}
  	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
  	case TCP_TW_SYN: {
@@@ -2197,12 -2205,6 +2206,6 @@@ void tcp_v4_destroy_sock(struct sock *s
  	if (inet_csk(sk)->icsk_bind_hash)
  		inet_put_port(sk);
  
- 	/* TCP Cookie Transactions */
- 	if (tp->cookie_values != NULL) {
- 		kref_put(&tp->cookie_values->kref,
- 			 tcp_cookie_values_release);
- 		tp->cookie_values = NULL;
- 	}
  	BUG_ON(tp->fastopen_rsk != NULL);
  
  	/* If socket is aborted during connect operation */
@@@ -2580,7 -2582,7 +2583,7 @@@ static void tcp_seq_stop(struct seq_fil
  
  int tcp_seq_open(struct inode *inode, struct file *file)
  {
 -	struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
 +	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
  	struct tcp_iter_state *s;
  	int err;
  
@@@ -2659,7 -2661,9 +2662,9 @@@ static void get_tcp4_sock(struct sock *
  	__u16 srcp = ntohs(inet->inet_sport);
  	int rx_queue;
  
- 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
+ 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+ 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
  		timer_active	= 1;
  		timer_expires	= icsk->icsk_timeout;
  	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
diff --combined net/ipv4/udp.c
index d272643,3159d16..6abbe64
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@@ -902,9 -902,9 +902,9 @@@ int udp_sendmsg(struct kiocb *iocb, str
  	ipc.addr = inet->inet_saddr;
  
  	ipc.oif = sk->sk_bound_dev_if;
- 	err = sock_tx_timestamp(sk, &ipc.tx_flags);
- 	if (err)
- 		return err;
+ 
+ 	sock_tx_timestamp(sk, &ipc.tx_flags);
+ 
  	if (msg->msg_controllen) {
  		err = ip_cmsg_send(sock_net(sk), msg, &ipc);
  		if (err)
@@@ -1131,6 -1131,8 +1131,8 @@@ static unsigned int first_packet_length
  	spin_lock_bh(&rcvq->lock);
  	while ((skb = skb_peek(rcvq)) != NULL &&
  		udp_lib_checksum_complete(skb)) {
+ 		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS,
+ 				 IS_UDPLITE(sk));
  		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
  				 IS_UDPLITE(sk));
  		atomic_inc(&sk->sk_drops);
@@@ -1286,8 -1288,10 +1288,10 @@@ out
  
  csum_copy_err:
  	slow = lock_sock_fast(sk);
- 	if (!skb_kill_datagram(sk, skb, flags))
+ 	if (!skb_kill_datagram(sk, skb, flags)) {
+ 		UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
  		UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+ 	}
  	unlock_sock_fast(sk, slow);
  
  	if (noblock)
@@@ -1513,7 -1517,7 +1517,7 @@@ int udp_queue_rcv_skb(struct sock *sk, 
  
  	if (rcu_access_pointer(sk->sk_filter) &&
  	    udp_lib_checksum_complete(skb))
- 		goto drop;
+ 		goto csum_error;
  
  
  	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
@@@ -1533,6 -1537,8 +1537,8 @@@
  
  	return rc;
  
+ csum_error:
+ 	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
  drop:
  	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  	atomic_inc(&sk->sk_drops);
@@@ -1749,6 -1755,7 +1755,7 @@@ csum_error
  		       proto == IPPROTO_UDPLITE ? "Lite" : "",
  		       &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
  		       ulen);
+ 	UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
  drop:
  	UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
  	kfree_skb(skb);
@@@ -2093,7 -2100,7 +2100,7 @@@ static void udp_seq_stop(struct seq_fil
  
  int udp_seq_open(struct inode *inode, struct file *file)
  {
 -	struct udp_seq_afinfo *afinfo = PDE(inode)->data;
 +	struct udp_seq_afinfo *afinfo = PDE_DATA(inode);
  	struct udp_iter_state *s;
  	int err;
  
@@@ -2279,31 -2286,88 +2286,88 @@@ void __init udp_init(void
  
  int udp4_ufo_send_check(struct sk_buff *skb)
  {
- 	const struct iphdr *iph;
- 	struct udphdr *uh;
- 
- 	if (!pskb_may_pull(skb, sizeof(*uh)))
+ 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  		return -EINVAL;
  
- 	iph = ip_hdr(skb);
- 	uh = udp_hdr(skb);
+ 	if (likely(!skb->encapsulation)) {
+ 		const struct iphdr *iph;
+ 		struct udphdr *uh;
+ 
+ 		iph = ip_hdr(skb);
+ 		uh = udp_hdr(skb);
  
- 	uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
- 				       IPPROTO_UDP, 0);
- 	skb->csum_start = skb_transport_header(skb) - skb->head;
- 	skb->csum_offset = offsetof(struct udphdr, check);
- 	skb->ip_summed = CHECKSUM_PARTIAL;
+ 		uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
+ 				IPPROTO_UDP, 0);
+ 		skb->csum_start = skb_transport_header(skb) - skb->head;
+ 		skb->csum_offset = offsetof(struct udphdr, check);
+ 		skb->ip_summed = CHECKSUM_PARTIAL;
+ 	}
  	return 0;
  }
  
+ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+ 		netdev_features_t features)
+ {
+ 	struct sk_buff *segs = ERR_PTR(-EINVAL);
+ 	int mac_len = skb->mac_len;
+ 	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
+ 	int outer_hlen;
+ 	netdev_features_t enc_features;
+ 
+ 	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
+ 		goto out;
+ 
+ 	skb->encapsulation = 0;
+ 	__skb_pull(skb, tnl_hlen);
+ 	skb_reset_mac_header(skb);
+ 	skb_set_network_header(skb, skb_inner_network_offset(skb));
+ 	skb->mac_len = skb_inner_network_offset(skb);
+ 
+ 	/* segment inner packet. */
+ 	enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+ 	segs = skb_mac_gso_segment(skb, enc_features);
+ 	if (!segs || IS_ERR(segs))
+ 		goto out;
+ 
+ 	outer_hlen = skb_tnl_header_len(skb);
+ 	skb = segs;
+ 	do {
+ 		struct udphdr *uh;
+ 		int udp_offset = outer_hlen - tnl_hlen;
+ 
+ 		skb->mac_len = mac_len;
+ 
+ 		skb_push(skb, outer_hlen);
+ 		skb_reset_mac_header(skb);
+ 		skb_set_network_header(skb, mac_len);
+ 		skb_set_transport_header(skb, udp_offset);
+ 		uh = udp_hdr(skb);
+ 		uh->len = htons(skb->len - udp_offset);
+ 
+ 		/* csum segment if tunnel sets skb with csum. */
+ 		if (unlikely(uh->check)) {
+ 			struct iphdr *iph = ip_hdr(skb);
+ 
+ 			uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 						       skb->len - udp_offset,
+ 						       IPPROTO_UDP, 0);
+ 			uh->check = csum_fold(skb_checksum(skb, udp_offset,
+ 							   skb->len - udp_offset, 0));
+ 			if (uh->check == 0)
+ 				uh->check = CSUM_MANGLED_0;
+ 
+ 		}
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 	} while ((skb = skb->next));
+ out:
+ 	return segs;
+ }
+ 
  struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
  	netdev_features_t features)
  {
  	struct sk_buff *segs = ERR_PTR(-EINVAL);
  	unsigned int mss;
- 	int offset;
- 	__wsum csum;
- 
  	mss = skb_shinfo(skb)->gso_size;
  	if (unlikely(skb->len <= mss))
  		goto out;
@@@ -2313,6 -2377,7 +2377,7 @@@
  		int type = skb_shinfo(skb)->gso_type;
  
  		if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
+ 				      SKB_GSO_UDP_TUNNEL |
  				      SKB_GSO_GRE) ||
  			     !(type & (SKB_GSO_UDP))))
  			goto out;
@@@ -2323,20 -2388,27 +2388,27 @@@
  		goto out;
  	}
  
  	/* Fragment the skb. IP headers of the fragments are updated in
  	 * inet_gso_segment()
  	 */
- 	segs = skb_segment(skb, features);
+ 	if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
+ 		segs = skb_udp_tunnel_segment(skb, features);
+ 	else {
+ 		int offset;
+ 		__wsum csum;
+ 
+ 		/* Do software UFO. Complete and fill in the UDP checksum as
+ 		 * HW cannot do checksum of UDP packets sent as multiple
+ 		 * IP fragments.
+ 		 */
+ 		offset = skb_checksum_start_offset(skb);
+ 		csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ 		offset += skb->csum_offset;
+ 		*(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 
+ 		segs = skb_segment(skb, features);
+ 	}
  out:
  	return segs;
  }
- 
diff --combined net/ipv6/proc.c
index 537d9ee,115cc58..f3c1ff4
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@@ -90,6 -90,7 +90,7 @@@ static const struct snmp_mib snmp6_ipst
  	SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
  	SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
  	SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
+ 	SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
  	SNMP_MIB_SENTINEL
  };
  
@@@ -99,6 -100,7 +100,7 @@@ static const struct snmp_mib snmp6_icmp
  	SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
  	SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
  	SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS),
+ 	SNMP_MIB_ITEM("Icmp6InCsumErrors", ICMP6_MIB_CSUMERRORS),
  	SNMP_MIB_SENTINEL
  };
  
@@@ -129,6 -131,7 +131,7 @@@ static const struct snmp_mib snmp6_udp6
  	SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
  	SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
  	SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS),
+ 	SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS),
  	SNMP_MIB_SENTINEL
  };
  
@@@ -139,6 -142,7 +142,7 @@@ static const struct snmp_mib snmp6_udpl
  	SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
  	SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
  	SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS),
+ 	SNMP_MIB_ITEM("UdpLite6InCsumErrors", UDP_MIB_CSUMERRORS),
  	SNMP_MIB_SENTINEL
  };
  
@@@ -247,7 -251,7 +251,7 @@@ static int snmp6_dev_seq_show(struct se
  
  static int snmp6_dev_seq_open(struct inode *inode, struct file *file)
  {
 -	return single_open(file, snmp6_dev_seq_show, PDE(inode)->data);
 +	return single_open(file, snmp6_dev_seq_show, PDE_DATA(inode));
  }
  
  static const struct file_operations snmp6_dev_seq_fops = {
@@@ -287,7 -291,8 +291,7 @@@ int snmp6_unregister_dev(struct inet6_d
  		return -ENOENT;
  	if (!idev->stats.proc_dir_entry)
  		return -EINVAL;
 -	remove_proc_entry(idev->stats.proc_dir_entry->name,
 -			  net->mib.proc_net_devsnmp6);
 +	proc_remove(idev->stats.proc_dir_entry);
  	idev->stats.proc_dir_entry = NULL;
  	return 0;
  }
diff --combined net/mac802154/mac802154.h
index 703c121,5c9e021..d48422e
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/mac802154.h
@@@ -88,9 -88,7 +88,7 @@@ struct mac802154_sub_if_data 
  
  #define mac802154_to_priv(_hw)	container_of(_hw, struct mac802154_priv, hw)
  
- #define MAC802154_MAX_XMIT_ATTEMPTS	3
- 
 -#define MAC802154_CHAN_NONE		(~(u8)0) /* No channel is assigned */
 +#define MAC802154_CHAN_NONE		0xff /* No channel is assigned */
  
  extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
  extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
@@@ -114,5 -112,6 +112,6 @@@ void mac802154_dev_set_ieee_addr(struc
  u16 mac802154_dev_get_pan_id(const struct net_device *dev);
  void mac802154_dev_set_pan_id(struct net_device *dev, u16 val);
  void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
+ u8 mac802154_dev_get_dsn(const struct net_device *dev);
  
  #endif /* MAC802154_H */
diff --combined net/netfilter/ipvs/ip_vs_pe_sip.c
index e5920fb,9a8f421..9ef22bd
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@@ -13,7 -13,8 +13,8 @@@ static const char *ip_vs_dbg_callid(cha
  				    const char *callid, size_t callid_len,
  				    int *idx)
  {
- 	size_t len = min(min(callid_len, (size_t)64), buf_len - *idx - 1);
+ 	size_t max_len = 64;
+ 	size_t len = min3(max_len, callid_len, buf_len - *idx - 1);
  	memcpy(buf + *idx, callid, len);
  	buf[*idx+len] = '\0';
  	*idx += len + 1;
@@@ -37,10 -38,14 +38,10 @@@ static int get_callid(const char *dptr
  		if (ret > 0)
  			break;
  		if (!ret)
 -			return 0;
 +			return -EINVAL;
  		dataoff += *matchoff;
  	}
  
 -	/* Empty callid is useless */
 -	if (!*matchlen)
 -		return -EINVAL;
 -
  	/* Too large is useless */
  	if (*matchlen > IP_VS_PEDATA_MAXLEN)
  		return -EINVAL;
@@@ -168,6 -173,7 +169,7 @@@ static int __init ip_vs_sip_init(void
  static void __exit ip_vs_sip_cleanup(void)
  {
  	unregister_ip_vs_pe(&ip_vs_sip_pe);
+ 	synchronize_rcu();
  }
  
  module_init(ip_vs_sip_init);
diff --combined net/netfilter/x_tables.c
index 67fb7bf,1a73b18..8b03028
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@@ -2,6 -2,7 +2,7 @@@
   * x_tables core - Backend for {ip,ip6,arp}_tables
   *
   * Copyright (C) 2006-2006 Harald Welte <laforge at netfilter.org>
+  * Copyright (C) 2006-2012 Patrick McHardy <kaber at trash.net>
   *
   * Based on existing ip_tables code which is
   *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
@@@ -999,7 -1000,7 +1000,7 @@@ static int xt_table_open(struct inode *
  			   sizeof(struct xt_names_priv));
  	if (!ret) {
  		priv = ((struct seq_file *)file->private_data)->private;
 -		priv->af = (unsigned long)PDE(inode)->data;
 +		priv->af = (unsigned long)PDE_DATA(inode);
  	}
  	return ret;
  }
@@@ -1147,7 -1148,7 +1148,7 @@@ static int xt_match_open(struct inode *
  
  	seq = file->private_data;
  	seq->private = trav;
 -	trav->nfproto = (unsigned long)PDE(inode)->data;
 +	trav->nfproto = (unsigned long)PDE_DATA(inode);
  	return 0;
  }
  
@@@ -1211,7 -1212,7 +1212,7 @@@ static int xt_target_open(struct inode 
  
  	seq = file->private_data;
  	seq->private = trav;
 -	trav->nfproto = (unsigned long)PDE(inode)->data;
 +	trav->nfproto = (unsigned long)PDE_DATA(inode);
  	return 0;
  }
  
diff --combined net/netfilter/xt_hashlimit.c
index 905c328,0199e7b..9ff035c
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@@ -3,6 -3,7 +3,7 @@@
   *	separately for each hashbucket (sourceip/sourceport/dstip/dstport)
   *
   *	(C) 2003-2004 by Harald Welte <laforge at netfilter.org>
+  *	(C) 2006-2012 Patrick McHardy <kaber at trash.net>
   *	Copyright © CC Computer Consultants GmbH, 2007 - 2008
   *
   * Development of this code was funded by Astaro AG, http://www.astaro.com/
@@@ -107,7 -108,6 +108,7 @@@ struct xt_hashlimit_htable 
  
  	/* seq_file stuff */
  	struct proc_dir_entry *pde;
 +	const char *name;
  	struct net *net;
  
  	struct hlist_head hash[0];	/* hashtable itself */
@@@ -254,11 -254,6 +255,11 @@@ static int htable_create(struct net *ne
  	hinfo->count = 0;
  	hinfo->family = family;
  	hinfo->rnd_initialized = false;
 +	hinfo->name = kstrdup(minfo->name, GFP_KERNEL);
 +	if (!hinfo->name) {
 +		vfree(hinfo);
 +		return -ENOMEM;
 +	}
  	spin_lock_init(&hinfo->lock);
  
  	hinfo->pde = proc_create_data(minfo->name, 0,
@@@ -266,7 -261,6 +267,7 @@@
  		hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
  		&dl_file_ops, hinfo);
  	if (hinfo->pde == NULL) {
 +		kfree(hinfo->name);
  		vfree(hinfo);
  		return -ENOMEM;
  	}
@@@ -337,10 -331,9 +338,10 @@@ static void htable_destroy(struct xt_ha
  		parent = hashlimit_net->ip6t_hashlimit;
  
  	if(parent != NULL)
 -		remove_proc_entry(hinfo->pde->name, parent);
 +		remove_proc_entry(hinfo->name, parent);
  
  	htable_selective_cleanup(hinfo, select_all);
 +	kfree(hinfo->name);
  	vfree(hinfo);
  }
  
@@@ -352,7 -345,7 +353,7 @@@ static struct xt_hashlimit_htable *htab
  	struct xt_hashlimit_htable *hinfo;
  
  	hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
 -		if (!strcmp(name, hinfo->pde->name) &&
 +		if (!strcmp(name, hinfo->name) &&
  		    hinfo->family == family) {
  			hinfo->use++;
  			return hinfo;
@@@ -849,7 -842,7 +850,7 @@@ static int dl_proc_open(struct inode *i
  
  	if (!ret) {
  		struct seq_file *sf = file->private_data;
 -		sf->private = PDE(inode)->data;
 +		sf->private = PDE_DATA(inode);
  	}
  	return ret;
  }
@@@ -895,7 -888,7 +896,7 @@@ static void __net_exit hashlimit_proc_n
  		pde = hashlimit_net->ip6t_hashlimit;
  
  	hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
 -		remove_proc_entry(hinfo->pde->name, pde);
 +		remove_proc_entry(hinfo->name, pde);
  
  	hashlimit_net->ipt_hashlimit = NULL;
  	hashlimit_net->ip6t_hashlimit = NULL;
diff --combined net/nfc/llcp/sock.c
index e163157,d6faa47..b7bd8a8
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@@ -223,6 -223,156 +223,156 @@@ error
  	return ret;
  }
  
+ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
+ 			       char __user *optval, unsigned int optlen)
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+ 	u32 opt;
+ 	int err = 0;
+ 
+ 	pr_debug("%p optname %d\n", sk, optname);
+ 
+ 	if (level != SOL_NFC)
+ 		return -ENOPROTOOPT;
+ 
+ 	lock_sock(sk);
+ 
+ 	switch (optname) {
+ 	case NFC_LLCP_RW:
+ 		if (sk->sk_state == LLCP_CONNECTED ||
+ 		    sk->sk_state == LLCP_BOUND ||
+ 		    sk->sk_state == LLCP_LISTEN) {
+ 			err = -EINVAL;
+ 			break;
+ 		}
+ 
+ 		if (get_user(opt, (u32 __user *) optval)) {
+ 			err = -EFAULT;
+ 			break;
+ 		}
+ 
+ 		if (opt > LLCP_MAX_RW) {
+ 			err = -EINVAL;
+ 			break;
+ 		}
+ 
+ 		llcp_sock->rw = (u8) opt;
+ 
+ 		break;
+ 
+ 	case NFC_LLCP_MIUX:
+ 		if (sk->sk_state == LLCP_CONNECTED ||
+ 		    sk->sk_state == LLCP_BOUND ||
+ 		    sk->sk_state == LLCP_LISTEN) {
+ 			err = -EINVAL;
+ 			break;
+ 		}
+ 
+ 		if (get_user(opt, (u32 __user *) optval)) {
+ 			err = -EFAULT;
+ 			break;
+ 		}
+ 
+ 		if (opt > LLCP_MAX_MIUX) {
+ 			err = -EINVAL;
+ 			break;
+ 		}
+ 
+ 		llcp_sock->miux = cpu_to_be16((u16) opt);
+ 
+ 		break;
+ 
+ 	default:
+ 		err = -ENOPROTOOPT;
+ 		break;
+ 	}
+ 
+ 	release_sock(sk);
+ 
+ 	pr_debug("%p rw %d miux %d\n", llcp_sock,
+ 		 llcp_sock->rw, llcp_sock->miux);
+ 
+ 	return err;
+ }
+ 
+ static int nfc_llcp_getsockopt(struct socket *sock, int level, int optname,
+ 			       char __user *optval, int __user *optlen)
+ {
+ 	struct nfc_llcp_local *local;
+ 	struct sock *sk = sock->sk;
+ 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+ 	int len, err = 0;
+ 	u16 miux, remote_miu;
+ 	u8 rw;
+ 
+ 	pr_debug("%p optname %d\n", sk, optname);
+ 
+ 	if (level != SOL_NFC)
+ 		return -ENOPROTOOPT;
+ 
+ 	if (get_user(len, optlen))
+ 		return -EFAULT;
+ 
+ 	local = llcp_sock->local;
+ 	if (!local)
+ 		return -ENODEV;
+ 
+ 	len = min_t(u32, len, sizeof(u32));
+ 
+ 	lock_sock(sk);
+ 
+ 	switch (optname) {
+ 	case NFC_LLCP_RW:
+ 		rw = llcp_sock->rw > LLCP_MAX_RW ? local->rw : llcp_sock->rw;
+ 		if (put_user(rw, (u32 __user *) optval))
+ 			err = -EFAULT;
+ 
+ 		break;
+ 
+ 	case NFC_LLCP_MIUX:
+ 		miux = be16_to_cpu(llcp_sock->miux) > LLCP_MAX_MIUX ?
+ 			be16_to_cpu(local->miux) : be16_to_cpu(llcp_sock->miux);
+ 
+ 		if (put_user(miux, (u32 __user *) optval))
+ 			err = -EFAULT;
+ 
+ 		break;
+ 
+ 	case NFC_LLCP_REMOTE_MIU:
+ 		remote_miu = llcp_sock->remote_miu > LLCP_MAX_MIU ?
+ 				local->remote_miu : llcp_sock->remote_miu;
+ 
+ 		if (put_user(remote_miu, (u32 __user *) optval))
+ 			err = -EFAULT;
+ 
+ 		break;
+ 
+ 	case NFC_LLCP_REMOTE_LTO:
+ 		if (put_user(local->remote_lto / 10, (u32 __user *) optval))
+ 			err = -EFAULT;
+ 
+ 		break;
+ 
+ 	case NFC_LLCP_REMOTE_RW:
+ 		if (put_user(llcp_sock->remote_rw, (u32 __user *) optval))
+ 			err = -EFAULT;
+ 
+ 		break;
+ 
+ 	default:
+ 		err = -ENOPROTOOPT;
+ 		break;
+ 	}
+ 
+ 	release_sock(sk);
+ 
+ 	if (put_user(len, optlen))
+ 		return -EFAULT;
+ 
+ 	return err;
+ }
+ 
  void nfc_llcp_accept_unlink(struct sock *sk)
  {
  	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@@ -358,13 -508,12 +508,13 @@@ static int llcp_sock_getname(struct soc
  	pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
  		 llcp_sock->dsap, llcp_sock->ssap);
  
 -	uaddr->sa_family = AF_NFC;
 -
 +	memset(llcp_addr, 0, sizeof(*llcp_addr));
  	*len = sizeof(struct sockaddr_nfc_llcp);
  
 +	llcp_addr->sa_family = AF_NFC;
  	llcp_addr->dev_idx = llcp_sock->dev->idx;
  	llcp_addr->target_idx = llcp_sock->target_idx;
 +	llcp_addr->nfc_protocol = llcp_sock->nfc_protocol;
  	llcp_addr->dsap = llcp_sock->dsap;
  	llcp_addr->ssap = llcp_sock->ssap;
  	llcp_addr->service_name_len = llcp_sock->service_name_len;
@@@ -406,7 -555,8 +556,8 @@@ static unsigned int llcp_sock_poll(stru
  		return llcp_accept_poll(sk);
  
  	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- 		mask |= POLLERR;
+ 		mask |= POLLERR |
+ 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
  
  	if (!skb_queue_empty(&sk->sk_receive_queue))
  		mask |= POLLIN | POLLRDNORM;
@@@ -544,7 -694,7 +695,7 @@@ static int llcp_sock_connect(struct soc
  
  	llcp_sock->dev = dev;
  	llcp_sock->local = nfc_llcp_local_get(local);
- 	llcp_sock->miu = llcp_sock->local->remote_miu;
+ 	llcp_sock->remote_miu = llcp_sock->local->remote_miu;
  	llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
  	if (llcp_sock->ssap == LLCP_SAP_MAX) {
  		ret = -ENOMEM;
@@@ -741,8 -891,8 +892,8 @@@ static const struct proto_ops llcp_sock
  	.ioctl          = sock_no_ioctl,
  	.listen         = llcp_sock_listen,
  	.shutdown       = sock_no_shutdown,
- 	.setsockopt     = sock_no_setsockopt,
- 	.getsockopt     = sock_no_getsockopt,
+ 	.setsockopt     = nfc_llcp_setsockopt,
+ 	.getsockopt     = nfc_llcp_getsockopt,
  	.sendmsg        = llcp_sock_sendmsg,
  	.recvmsg        = llcp_sock_recvmsg,
  	.mmap           = sock_no_mmap,
@@@ -806,12 -956,13 +957,13 @@@ struct sock *nfc_llcp_sock_alloc(struc
  
  	llcp_sock->ssap = 0;
  	llcp_sock->dsap = LLCP_SAP_SDP;
- 	llcp_sock->rw = LLCP_DEFAULT_RW;
- 	llcp_sock->miu = LLCP_DEFAULT_MIU;
+ 	llcp_sock->rw = LLCP_MAX_RW + 1;
+ 	llcp_sock->miux = cpu_to_be16(LLCP_MAX_MIUX + 1);
  	llcp_sock->send_n = llcp_sock->send_ack_n = 0;
  	llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
  	llcp_sock->remote_ready = 1;
  	llcp_sock->reserved_ssap = LLCP_SAP_MAX;
+ 	nfc_llcp_socket_remote_param_init(llcp_sock);
  	skb_queue_head_init(&llcp_sock->tx_queue);
  	skb_queue_head_init(&llcp_sock->tx_pending_queue);
  	INIT_LIST_HEAD(&llcp_sock->accept_queue);
diff --combined net/socket.c
index 9663df6,280283f..b416093
--- a/net/socket.c
+++ b/net/socket.c
@@@ -600,7 -600,7 +600,7 @@@ void sock_release(struct socket *sock
  }
  EXPORT_SYMBOL(sock_release);
  
- int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
+ void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
  {
  	*tx_flags = 0;
  	if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
@@@ -609,7 -609,6 +609,6 @@@
  		*tx_flags |= SKBTX_SW_TSTAMP;
  	if (sock_flag(sk, SOCK_WIFI_STATUS))
  		*tx_flags |= SKBTX_WIFI_STATUS;
- 	return 0;
  }
  EXPORT_SYMBOL(sock_tx_timestamp);
  
@@@ -682,16 -681,6 +681,6 @@@ int kernel_sendmsg(struct socket *sock
  }
  EXPORT_SYMBOL(kernel_sendmsg);
  
- static int ktime2ts(ktime_t kt, struct timespec *ts)
- {
- 	if (kt.tv64) {
- 		*ts = ktime_to_timespec(kt);
- 		return 1;
- 	} else {
- 		return 0;
- 	}
- }
- 
  /*
   * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
   */
@@@ -724,17 -713,15 +713,15 @@@ void __sock_recv_timestamp(struct msghd
  
  
  	memset(ts, 0, sizeof(ts));
- 	if (skb->tstamp.tv64 &&
- 	    sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) {
- 		skb_get_timestampns(skb, ts + 0);
+ 	if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE) &&
+ 	    ktime_to_timespec_cond(skb->tstamp, ts + 0))
  		empty = 0;
- 	}
  	if (shhwtstamps) {
  		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) &&
- 		    ktime2ts(shhwtstamps->syststamp, ts + 1))
+ 		    ktime_to_timespec_cond(shhwtstamps->syststamp, ts + 1))
  			empty = 0;
  		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) &&
- 		    ktime2ts(shhwtstamps->hwtstamp, ts + 2))
+ 		    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts + 2))
  			empty = 0;
  	}
  	if (!empty)
@@@ -1173,6 -1160,15 +1160,6 @@@ static int sock_mmap(struct file *file
  
  static int sock_close(struct inode *inode, struct file *filp)
  {
 -	/*
 -	 *      It was possible the inode is NULL we were
 -	 *      closing an unfinished socket.
 -	 */
 -
 -	if (!inode) {
 -		printk(KERN_DEBUG "sock_close: NULL inode\n");
 -		return 0;
 -	}
  	sock_release(SOCKET_I(inode));
  	return 0;
  }

-- 
LinuxNextTracking


More information about the linux-merge mailing list