[linux-next] LinuxNextTracking branch, master, updated. next-20140106

batman at open-mesh.org batman at open-mesh.org
Tue Jan 7 00:19:23 CET 2014


The following commit has been merged in the master branch:
commit a28025b73b895491d24de593eaf292e6f1973467
Merge: 1efe709fc5b39e2bba2203d80676ce578ed61637 a1d4b03a076d95edc88d070f7627a73ab80abddc
Author: Stephen Rothwell <sfr at canb.auug.org.au>
Date:   Mon Jan 6 15:34:14 2014 +1100

    Merge remote-tracking branch 'net-next/master'
    
    Conflicts:
    	drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
    	net/ipv6/ip6_tunnel.c
    	net/ipv6/ip6_vti.c

diff --combined MAINTAINERS
index 19da3e2,23bd3c2..2a047d4
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -783,7 -783,7 +783,7 @@@ F:	arch/arm/boot/dts/sama*.dt
  F:	arch/arm/boot/dts/sama*.dtsi
  
  ARM/CALXEDA HIGHBANK ARCHITECTURE
 -M:	Rob Herring <rob.herring at calxeda.com>
 +M:	Rob Herring <robh at kernel.org>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
  S:	Maintained
  F:	arch/arm/mach-highbank/
@@@ -867,12 -867,6 +867,12 @@@ S:	Maintaine
  F:	arch/arm/mach-ebsa110/
  F:	drivers/net/ethernet/amd/am79c961a.*
  
 +ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT
 +M:	Uwe Kleine-König <kernel at pengutronix.de>
 +L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
 +S:	Maintained
 +N:	efm32
 +
  ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
  M:	Daniel Ribeiro <drwyrm at gmail.com>
  M:	Stefan Schmidt <stefan at openezx.org>
@@@ -1014,8 -1008,6 +1014,8 @@@ M:	Santosh Shilimkar <santosh.shilimkar
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
  S:	Maintained
  F:	arch/arm/mach-keystone/
 +F:	drivers/clk/keystone/
 +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
  
  ARM/LOGICPD PXA270 MACHINE SUPPORT
  M:	Lennert Buytenhek <kernel at wantstofly.org>
@@@ -1035,12 -1027,6 +1035,12 @@@ L:	linux-arm-kernel at lists.infradead.or
  S:	Maintained
  F:	arch/arm/mach-mvebu/
  
 +ARM/Marvell Berlin SoC support
 +M:	Sebastian Hesselbarth <sebastian.hesselbarth at gmail.com>
 +L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
 +S:	Maintained
 +F:	arch/arm/mach-berlin/
 +
  ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support
  M:	Jason Cooper <jason at lakedaemon.net>
  M:	Andrew Lunn <andrew at lunn.ch>
@@@ -1380,9 -1366,6 +1380,9 @@@ T:	git git://git.xilinx.com/linux-xlnx.
  S:	Supported
  F:	arch/arm/mach-zynq/
  F:	drivers/cpuidle/cpuidle-zynq.c
 +N:	zynq
 +N:	xilinx
 +F:	drivers/clocksource/cadence_ttc_timer.c
  
  ARM SMMU DRIVER
  M:	Will Deacon <will.deacon at arm.com>
@@@ -1447,7 -1430,7 +1447,7 @@@ F:	Documentation/aoe
  F:	drivers/block/aoe/
  
  ATHEROS ATH GENERIC UTILITIES
- M:	"Luis R. Rodriguez" <mcgrof at qca.qualcomm.com>
+ M:	"Luis R. Rodriguez" <mcgrof at do-not-panic.com>
  L:	linux-wireless at vger.kernel.org
  S:	Supported
  F:	drivers/net/wireless/ath/*
@@@ -1455,7 -1438,7 +1455,7 @@@
  ATHEROS ATH5K WIRELESS DRIVER
  M:	Jiri Slaby <jirislaby at gmail.com>
  M:	Nick Kossifidis <mickflemm at gmail.com>
- M:	"Luis R. Rodriguez" <mcgrof at qca.qualcomm.com>
+ M:	"Luis R. Rodriguez" <mcgrof at do-not-panic.com>
  L:	linux-wireless at vger.kernel.org
  L:	ath5k-devel at lists.ath5k.org
  W:	http://wireless.kernel.org/en/users/Drivers/ath5k
@@@ -1470,17 -1453,6 +1470,6 @@@ T:	git git://github.com/kvalo/ath.gi
  S:	Supported
  F:	drivers/net/wireless/ath/ath6kl/
  
- ATHEROS ATH9K WIRELESS DRIVER
- M:	"Luis R. Rodriguez" <mcgrof at qca.qualcomm.com>
- M:	Jouni Malinen <jouni at qca.qualcomm.com>
- M:	Vasanthakumar Thiagarajan <vthiagar at qca.qualcomm.com>
- M:	Senthil Balasubramanian <senthilb at qca.qualcomm.com>
- L:	linux-wireless at vger.kernel.org
- L:	ath9k-devel at lists.ath9k.org
- W:	http://wireless.kernel.org/en/users/Drivers/ath9k
- S:	Supported
- F:	drivers/net/wireless/ath/ath9k/
- 
  WILOCITY WIL6210 WIRELESS DRIVER
  M:	Vladimir Kondratiev <qca_vkondrat at qca.qualcomm.com>
  L:	linux-wireless at vger.kernel.org
@@@ -2036,6 -2008,7 +2025,7 @@@ L:	linux-can at vger.kernel.or
  W:	http://gitorious.org/linux-can
  T:	git git://gitorious.org/linux-can/linux-can-next.git
  S:	Maintained
+ F:	Documentation/networking/can.txt
  F:	net/can/
  F:	include/linux/can/core.h
  F:	include/uapi/linux/can.h
@@@ -3850,12 -3823,6 +3840,12 @@@ T:	git git://linuxtv.org/media_tree.gi
  S:	Maintained
  F:	drivers/media/usb/gspca/
  
 +GUID PARTITION TABLE (GPT)
 +M:	Davidlohr Bueso <davidlohr at hp.com>
 +L:	linux-efi at vger.kernel.org
 +S:	Maintained
 +F:	block/partitions/efi.*
 +
  STK1160 USB VIDEO CAPTURE DRIVER
  M:	Ezequiel Garcia <elezegarcia at gmail.com>
  L:	linux-media at vger.kernel.org
@@@ -4484,7 -4451,7 +4474,7 @@@ M:	Deepak Saxena <dsaxena at plexity.net
  S:	Maintained
  F:	drivers/char/hw_random/ixp4xx-rng.c
  
- INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e)
+ INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
  M:	Jeff Kirsher <jeffrey.t.kirsher at intel.com>
  M:	Jesse Brandeburg <jesse.brandeburg at intel.com>
  M:	Bruce Allan <bruce.w.allan at intel.com>
@@@ -4493,6 -4460,7 +4483,7 @@@ M:	Don Skidmore <donald.c.skidmore at inte
  M:	Greg Rose <gregory.v.rose at intel.com>
  M:	Alex Duyck <alexander.h.duyck at intel.com>
  M:	John Ronciak <john.ronciak at intel.com>
+ M:	Mitch Williams <mitch.a.williams at intel.com>
  L:	e1000-devel at lists.sourceforge.net
  W:	http://www.intel.com/support/feedback.htm
  W:	http://e1000.sourceforge.net/
@@@ -4508,6 -4476,7 +4499,7 @@@ F:	Documentation/networking/ixgb.tx
  F:	Documentation/networking/ixgbe.txt
  F:	Documentation/networking/ixgbevf.txt
  F:	Documentation/networking/i40e.txt
+ F:	Documentation/networking/i40evf.txt
  F:	drivers/net/ethernet/intel/
  
  INTEL-MID GPIO DRIVER
@@@ -5386,16 -5355,6 +5378,16 @@@ W:	http://www.tazenda.demon.co.uk/phil/
  S:	Maintained
  F:	arch/m68k/hp300/
  
 +M88DS3103 MEDIA DRIVER
 +M:	Antti Palosaari <crope at iki.fi>
 +L:	linux-media at vger.kernel.org
 +W:	http://linuxtv.org/
 +W:	http://palosaari.fi/linux/
 +Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 +T:	git git://linuxtv.org/anttip/media_tree.git
 +S:	Maintained
 +F:	drivers/media/dvb-frontends/m88ds3103*
 +
  M88RS2000 MEDIA DRIVER
  M:	Malcolm Priestley <tvboxspy at gmail.com>
  L:	linux-media at vger.kernel.org
@@@ -5404,16 -5363,6 +5396,16 @@@ Q:	http://patchwork.linuxtv.org/project
  S:	Maintained
  F:	drivers/media/dvb-frontends/m88rs2000*
  
 +M88TS2022 MEDIA DRIVER
 +M:	Antti Palosaari <crope at iki.fi>
 +L:	linux-media at vger.kernel.org
 +W:	http://linuxtv.org/
 +W:	http://palosaari.fi/linux/
 +Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 +T:	git git://linuxtv.org/anttip/media_tree.git
 +S:	Maintained
 +F:	drivers/media/tuners/m88ts2022*
 +
  MA901 MASTERKIT USB FM RADIO DRIVER
  M:      Alexey Klimov <klimov.linux at gmail.com>
  L:      linux-media at vger.kernel.org
@@@ -6291,7 -6240,7 +6283,7 @@@ F:	drivers/i2c/busses/i2c-ocores.
  
  OPEN FIRMWARE AND FLATTENED DEVICE TREE
  M:	Grant Likely <grant.likely at linaro.org>
 -M:	Rob Herring <rob.herring at calxeda.com>
 +M:	Rob Herring <robh+dt at kernel.org>
  L:	devicetree at vger.kernel.org
  W:	http://fdt.secretlab.ca
  T:	git git://git.secretlab.ca/git/linux-2.6.git
@@@ -6303,7 -6252,7 +6295,7 @@@ K:	of_get_propert
  K:	of_match_table
  
  OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 -M:	Rob Herring <rob.herring at calxeda.com>
 +M:	Rob Herring <robh+dt at kernel.org>
  M:	Pawel Moll <pawel.moll at arm.com>
  M:	Mark Rutland <mark.rutland at arm.com>
  M:	Ian Campbell <ijc+devicetree at hellion.org.uk>
@@@ -7018,6 -6967,14 +7010,14 @@@ T:	git git://linuxtv.org/anttip/media_t
  S:	Maintained
  F:	drivers/media/tuners/qt1010*
  
+ QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
+ M:	QCA ath9k Development <ath9k-devel at qca.qualcomm.com>
+ L:	linux-wireless at vger.kernel.org
+ L:	ath9k-devel at lists.ath9k.org
+ W:	http://wireless.kernel.org/en/users/Drivers/ath9k
+ S:	Supported
+ F:	drivers/net/wireless/ath/ath9k/
+ 
  QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
  M:	Kalle Valo <kvalo at qca.qualcomm.com>
  L:	ath10k at lists.infradead.org
@@@ -7427,13 -7384,6 +7427,13 @@@ L:	linux-media at vger.kernel.or
  S:	Supported
  F:	drivers/media/i2c/s5c73m3/*
  
 +SAMSUNG S5K5BAF CAMERA DRIVER
 +M:	Kyungmin Park <kyungmin.park at samsung.com>
 +M:	Andrzej Hajda <a.hajda at samsung.com>
 +L:	linux-media at vger.kernel.org
 +S:	Supported
 +F:	drivers/media/i2c/s5k5baf.c
 +
  SERIAL DRIVERS
  M:	Greg Kroah-Hartman <gregkh at linuxfoundation.org>
  L:	linux-serial at vger.kernel.org
@@@ -7728,7 -7678,7 +7728,7 @@@ L:	linux-media at vger.kernel.or
  T:	git git://linuxtv.org/media_tree.git
  W:	http://linuxtv.org
  S:	Odd Fixes
 -F:	drivers/media/radio/si4713-i2c.?
 +F:	drivers/media/radio/si4713/si4713.?
  
  SI4713 FM RADIO TRANSMITTER PLATFORM DRIVER
  M:	Eduardo Valentin <edubezval at gmail.com>
@@@ -7736,15 -7686,7 +7736,15 @@@ L:	linux-media at vger.kernel.or
  T:	git git://linuxtv.org/media_tree.git
  W:	http://linuxtv.org
  S:	Odd Fixes
 -F:	drivers/media/radio/radio-si4713.c
 +F:	drivers/media/radio/si4713/radio-platform-si4713.c
 +
 +SI4713 FM RADIO TRANSMITTER USB DRIVER
 +M:	Hans Verkuil <hverkuil at xs4all.nl>
 +L:	linux-media at vger.kernel.org
 +T:	git git://linuxtv.org/media_tree.git
 +W:	http://linuxtv.org
 +S:	Maintained
 +F:	drivers/media/radio/si4713/radio-usb-si4713.c
  
  SIANO DVB DRIVER
  M:	Mauro Carvalho Chehab <m.chehab at samsung.com>
@@@ -8595,14 -8537,6 +8595,14 @@@ L:	linux-xtensa at linux-xtensa.or
  S:	Maintained
  F:	arch/xtensa/
  
 +THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
 +M:	Hans Verkuil <hverkuil at xs4all.nl>
 +L:	linux-media at vger.kernel.org
 +T:	git git://linuxtv.org/media_tree.git
 +W:	http://linuxtv.org
 +S:	Maintained
 +F:	drivers/media/radio/radio-raremono.c
 +
  THERMAL
  M:      Zhang Rui <rui.zhang at intel.com>
  M:      Eduardo Valentin <eduardo.valentin at ti.com>
@@@ -8614,7 -8548,6 +8614,7 @@@ S:      Supporte
  F:      drivers/thermal/
  F:      include/linux/thermal.h
  F:      include/linux/cpu_cooling.h
 +F:      Documentation/devicetree/bindings/thermal/
  
  THINGM BLINK(1) USB RGB LED DRIVER
  M:	Vivien Didelot <vivien.didelot at savoirfairelinux.com>
@@@ -8682,12 -8615,11 +8682,11 @@@ S:	Maintaine
  F:	sound/soc/codecs/twl4030*
  
  TI WILINK WIRELESS DRIVERS
- M:	Luciano Coelho <luca at coelho.fi>
  L:	linux-wireless at vger.kernel.org
  W:	http://wireless.kernel.org/en/users/Drivers/wl12xx
  W:	http://wireless.kernel.org/en/users/Drivers/wl1251
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
- S:	Maintained
+ S:	Orphan
  F:	drivers/net/wireless/ti/
  F:	include/linux/wl12xx.h
  
@@@ -9141,7 -9073,8 +9140,7 @@@ L:	linux-media at vger.kernel.or
  T:	git git://linuxtv.org/media_tree.git
  W:	http://www.linux-projects.org
  S:	Maintained
 -F:	Documentation/video4linux/sn9c102.txt
 -F:	drivers/media/usb/sn9c102/
 +F:	drivers/staging/media/sn9c102/
  
  USB SUBSYSTEM
  M:	Greg Kroah-Hartman <gregkh at linuxfoundation.org>
@@@ -9648,7 -9581,7 +9647,7 @@@ F:	drivers/xen/*swiotlb
  
  XFS FILESYSTEM
  P:	Silicon Graphics Inc
 -M:	Dave Chinner <dchinner at fromorbit.com>
 +M:	Dave Chinner <david at fromorbit.com>
  M:	Ben Myers <bpm at sgi.com>
  M:	xfs at oss.sgi.com
  L:	xfs at oss.sgi.com
diff --combined arch/arm64/include/asm/Kbuild
index d0ff25d,626d4a9..71c53ec
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@@ -26,6 -26,7 +26,6 @@@ generic-y += mman.
  generic-y += msgbuf.h
  generic-y += mutex.h
  generic-y += pci.h
 -generic-y += percpu.h
  generic-y += poll.h
  generic-y += posix_types.h
  generic-y += resource.h
@@@ -50,3 -51,4 +50,4 @@@ generic-y += user.
  generic-y += vga.h
  generic-y += xor.h
  generic-y += preempt.h
+ generic-y += hash.h
diff --combined arch/xtensa/include/asm/Kbuild
index 5851db2,d7efa15..0a337e4
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@@ -8,6 -8,7 +8,6 @@@ generic-y += emergency-restart.
  generic-y += errno.h
  generic-y += exec.h
  generic-y += fcntl.h
 -generic-y += futex.h
  generic-y += hardirq.h
  generic-y += ioctl.h
  generic-y += irq_regs.h
@@@ -28,3 -29,4 +28,4 @@@ generic-y += topology.
  generic-y += trace_clock.h
  generic-y += xor.h
  generic-y += preempt.h
+ generic-y += hash.h
diff --combined drivers/bluetooth/ath3k.c
index dceb85f,d3fdc32..106d1d8
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@@ -83,11 -83,11 +83,12 @@@ static const struct usb_device_id ath3k
  	{ USB_DEVICE(0x04CA, 0x3005) },
  	{ USB_DEVICE(0x04CA, 0x3006) },
  	{ USB_DEVICE(0x04CA, 0x3008) },
+ 	{ USB_DEVICE(0x04CA, 0x300b) },
  	{ USB_DEVICE(0x13d3, 0x3362) },
  	{ USB_DEVICE(0x0CF3, 0xE004) },
  	{ USB_DEVICE(0x0CF3, 0xE005) },
  	{ USB_DEVICE(0x0930, 0x0219) },
 +	{ USB_DEVICE(0x0930, 0x0220) },
  	{ USB_DEVICE(0x0489, 0xe057) },
  	{ USB_DEVICE(0x13d3, 0x3393) },
  	{ USB_DEVICE(0x0489, 0xe04e) },
@@@ -97,6 -97,7 +98,7 @@@
  	{ USB_DEVICE(0x13d3, 0x3402) },
  	{ USB_DEVICE(0x0cf3, 0x3121) },
  	{ USB_DEVICE(0x0cf3, 0xe003) },
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
  
  	/* Atheros AR5BBU12 with sflash firmware */
  	{ USB_DEVICE(0x0489, 0xE02C) },
@@@ -126,11 -127,11 +128,12 @@@ static const struct usb_device_id ath3k
  	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 +	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
@@@ -140,6 -141,7 +143,7 @@@
  	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
  
  	/* Atheros AR5BBU22 with sflash firmware */
  	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --combined drivers/bluetooth/btusb.c
index 3980fd1,bfbcc5a..9f7e539
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@@ -150,11 -150,11 +150,12 @@@ static const struct usb_device_id black
  	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 +	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
@@@ -164,6 -164,7 +165,7 @@@
  	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
  	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
  
  	/* Atheros AR5BBU12 with sflash firmware */
  	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@@ -224,6 -225,7 +226,7 @@@
  
  	/* Intel Bluetooth device */
  	{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
+ 	{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
  
  	{ }	/* Terminating entry */
  };
@@@ -1436,8 -1438,10 +1439,10 @@@ static int btusb_probe(struct usb_inter
  	if (id->driver_info & BTUSB_BCM92035)
  		hdev->setup = btusb_setup_bcm92035;
  
- 	if (id->driver_info & BTUSB_INTEL)
+ 	if (id->driver_info & BTUSB_INTEL) {
+ 		usb_enable_autosuspend(data->udev);
  		hdev->setup = btusb_setup_intel;
+ 	}
  
  	/* Interface numbers are hardcoded in the specification */
  	data->isoc = usb_ifnum_to_if(data->udev, 1);
diff --combined drivers/net/bonding/bond_3ad.c
index 4ced594,81559b2..539e24a
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@@ -90,8 -90,9 +90,9 @@@
  #define     AD_LINK_SPEED_BITMASK_10000MBPS   0x10
  //endalloun
  
- // compare MAC addresses
- #define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN)
+ /* compare MAC addresses */
+ #define MAC_ADDRESS_EQUAL(A, B)	\
+ 	ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
  
  static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
  static u16 ad_ticks_per_sec;
@@@ -147,11 -148,12 +148,12 @@@ static inline struct aggregator *__get_
  	struct bonding *bond = __get_bond_by_port(port);
  	struct slave *first_slave;
  
- 	// If there's no bond for this port, or bond has no slaves
+ 	/* If there's no bond for this port, or bond has no slaves */
  	if (bond == NULL)
  		return NULL;
- 	first_slave = bond_first_slave(bond);
- 
+ 	rcu_read_lock();
+ 	first_slave = bond_first_slave_rcu(bond);
+ 	rcu_read_unlock();
  	return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
  }
  
@@@ -416,17 -418,18 +418,18 @@@ static u16 __ad_timer_to_ticks(u16 time
   */
  static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
  {
- 	// check if all parameters are alike
+ 	/* check if all parameters are alike
+ 	 * or this is individual link(aggregation == FALSE)
+ 	 * then update the state machine Matched variable.
+ 	 */
  	if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
  	     (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
- 	     !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
+ 	     MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
  	     (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
  	     (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
  	     ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
- 	    // or this is individual link(aggregation == FALSE)
  	    ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
  		) {
- 		// update the state machine Matched variable
  		port->sm_vars |= AD_PORT_MATCHED;
  	} else {
  		port->sm_vars &= ~AD_PORT_MATCHED;
@@@ -506,14 -509,15 +509,15 @@@ static void __update_selected(struct la
  	if (lacpdu && port) {
  		const struct port_params *partner = &port->partner_oper;
  
- 		// check if any parameter is different
+ 		/* check if any parameter is different then
+ 		 * update the state machine selected variable.
+ 		 */
  		if (ntohs(lacpdu->actor_port) != partner->port_number ||
  		    ntohs(lacpdu->actor_port_priority) != partner->port_priority ||
- 		    MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system) ||
+ 		    !MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
  		    ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
  		    ntohs(lacpdu->actor_key) != partner->key ||
  		    (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
- 			// update the state machine Selected variable
  			port->sm_vars &= ~AD_PORT_SELECTED;
  		}
  	}
@@@ -537,15 -541,16 +541,16 @@@ static void __update_default_selected(s
  		const struct port_params *admin = &port->partner_admin;
  		const struct port_params *oper = &port->partner_oper;
  
- 		// check if any parameter is different
+ 		/* check if any parameter is different then
+ 		 * update the state machine selected variable.
+ 		 */
  		if (admin->port_number != oper->port_number ||
  		    admin->port_priority != oper->port_priority ||
- 		    MAC_ADDRESS_COMPARE(&admin->system, &oper->system) ||
+ 		    !MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
  		    admin->system_priority != oper->system_priority ||
  		    admin->key != oper->key ||
  		    (admin->port_state & AD_STATE_AGGREGATION)
  			!= (oper->port_state & AD_STATE_AGGREGATION)) {
- 			// update the state machine Selected variable
  			port->sm_vars &= ~AD_PORT_SELECTED;
  		}
  	}
@@@ -565,12 -570,14 +570,14 @@@
   */
  static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
  {
- 	// validate lacpdu and port
+ 	/* validate lacpdu and port */
  	if (lacpdu && port) {
- 		// check if any parameter is different
+ 		/* check if any parameter is different then
+ 		 * update the port->ntt.
+ 		 */
  		if ((ntohs(lacpdu->partner_port) != port->actor_port_number) ||
  		    (ntohs(lacpdu->partner_port_priority) != port->actor_port_priority) ||
- 		    MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) ||
+ 		    !MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
  		    (ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
  		    (ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
  		    ((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
@@@ -578,7 -585,6 +585,6 @@@
  		    ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
  		    ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
  		   ) {
- 
  			port->ntt = true;
  		}
  	}
@@@ -702,9 -708,13 +708,13 @@@ static struct aggregator *__get_active_
  	struct list_head *iter;
  	struct slave *slave;
  
- 	bond_for_each_slave(bond, slave, iter)
- 		if (SLAVE_AD_INFO(slave).aggregator.is_active)
+ 	rcu_read_lock();
+ 	bond_for_each_slave_rcu(bond, slave, iter)
+ 		if (SLAVE_AD_INFO(slave).aggregator.is_active) {
+ 			rcu_read_unlock();
  			return &(SLAVE_AD_INFO(slave).aggregator);
+ 		}
+ 	rcu_read_unlock();
  
  	return NULL;
  }
@@@ -1071,9 -1081,8 +1081,8 @@@ static void ad_rx_machine(struct lacpd
  			port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
  			break;
  		case AD_RX_CURRENT:
- 			// detect loopback situation
- 			if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
- 				// INFO_RECEIVED_LOOPBACK_FRAMES
+ 			/* detect loopback situation */
+ 			if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system), &(port->actor_system))) {
  				pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
  				       "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
  				       port->slave->bond->dev->name, port->slave->dev->name);
@@@ -1085,7 -1094,7 +1094,7 @@@
  			port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
  			port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
  			break;
- 		default:    //to silence the compiler
+ 		default:    /* to silence the compiler */
  			break;
  		}
  	}
@@@ -1276,17 -1285,17 +1285,17 @@@ static void ad_port_selection_logic(str
  				free_aggregator = aggregator;
  			continue;
  		}
- 		// check if current aggregator suits us
- 		if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND
- 		     !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper.system)) &&
+ 		/* check if current aggregator suits us */
+ 		if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && /* if all parameters match AND */
+ 		     MAC_ADDRESS_EQUAL(&(aggregator->partner_system), &(port->partner_oper.system)) &&
  		     (aggregator->partner_system_priority == port->partner_oper.system_priority) &&
  		     (aggregator->partner_oper_aggregator_key == port->partner_oper.key)
  		    ) &&
- 		    ((MAC_ADDRESS_COMPARE(&(port->partner_oper.system), &(null_mac_addr)) && // partner answers
- 		      !aggregator->is_individual)  // but is not individual OR
+ 		    ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+ 		      !aggregator->is_individual)  /* but is not individual OR */
  		    )
  		   ) {
- 			// attach to the founded aggregator
+ 			/* attach to the founded aggregator */
  			port->aggregator = aggregator;
  			port->actor_port_aggregator_identifier =
  				port->aggregator->aggregator_identifier;
@@@ -1471,7 -1480,8 +1480,8 @@@ static void ad_agg_selection_logic(stru
  	active = __get_active_agg(agg);
  	best = (active && agg_device_up(active)) ? active : NULL;
  
- 	bond_for_each_slave(bond, slave, iter) {
+ 	rcu_read_lock();
+ 	bond_for_each_slave_rcu(bond, slave, iter) {
  		agg = &(SLAVE_AD_INFO(slave).aggregator);
  
  		agg->is_active = 0;
@@@ -1505,7 -1515,7 +1515,7 @@@
  		active->is_active = 1;
  	}
  
- 	// if there is new best aggregator, activate it
+ 	/* if there is new best aggregator, activate it */
  	if (best) {
  		pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
  			 best->aggregator_identifier, best->num_of_ports,
@@@ -1516,7 -1526,7 +1526,7 @@@
  			 best->lag_ports, best->slave,
  			 best->slave ? best->slave->dev->name : "NULL");
  
- 		bond_for_each_slave(bond, slave, iter) {
+ 		bond_for_each_slave_rcu(bond, slave, iter) {
  			agg = &(SLAVE_AD_INFO(slave).aggregator);
  
  			pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
@@@ -1526,10 -1536,11 +1536,11 @@@
  				 agg->is_individual, agg->is_active);
  		}
  
- 		// check if any partner replys
+ 		/* check if any partner replys */
  		if (best->is_individual) {
  			pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- 				   best->slave ? best->slave->bond->dev->name : "NULL");
+ 				best->slave ?
+ 				best->slave->bond->dev->name : "NULL");
  		}
  
  		best->is_active = 1;
@@@ -1541,7 -1552,7 +1552,7 @@@
  			 best->partner_oper_aggregator_key,
  			 best->is_individual, best->is_active);
  
- 		// disable the ports that were related to the former active_aggregator
+ 		/* disable the ports that were related to the former active_aggregator */
  		if (active) {
  			for (port = active->lag_ports; port;
  			     port = port->next_port_in_aggregator) {
@@@ -1565,6 -1576,8 +1576,8 @@@
  		}
  	}
  
+ 	rcu_read_unlock();
+ 
  	bond_3ad_set_carrier(bond);
  }
  
@@@ -1696,7 -1709,7 +1709,7 @@@ static void ad_enable_collecting_distri
   */
  static void ad_disable_collecting_distributing(struct port *port)
  {
- 	if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
+ 	if (port->aggregator && !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system), &(null_mac_addr))) {
  		pr_debug("Disabling port %d(LAG %d)\n",
  			 port->actor_port_number,
  			 port->aggregator->aggregator_identifier);
@@@ -1817,8 -1830,8 +1830,8 @@@ static u16 aggregator_identifier
   */
  void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
  {
- 	// check that the bond is not initialized yet
- 	if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
+ 	/* check that the bond is not initialized yet */
+ 	if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
  				bond->dev->dev_addr)) {
  
  		aggregator_identifier = 0;
@@@ -1842,22 -1855,16 +1855,16 @@@
   * Returns:   0 on success
   *          < 0 on error
   */
- int bond_3ad_bind_slave(struct slave *slave)
+ void bond_3ad_bind_slave(struct slave *slave)
  {
  	struct bonding *bond = bond_get_bond_by_slave(slave);
  	struct port *port;
  	struct aggregator *aggregator;
  
- 	if (bond == NULL) {
- 		pr_err("%s: The slave %s is not attached to its bond\n",
- 		       slave->bond->dev->name, slave->dev->name);
- 		return -1;
- 	}
- 
- 	//check that the slave has not been initialized yet.
+ 	/* check that the slave has not been initialized yet. */
  	if (SLAVE_AD_INFO(slave).port.slave != slave) {
  
- 		// port initialization
+ 		/* port initialization */
  		port = &(SLAVE_AD_INFO(slave).port);
  
  		ad_initialize_port(port, bond->params.lacp_fast);
@@@ -1865,28 -1872,30 +1872,30 @@@
  		__initialize_port_locks(slave);
  		port->slave = slave;
  		port->actor_port_number = SLAVE_AD_INFO(slave).id;
- 		// key is determined according to the link speed, duplex and user key(which is yet not supported)
- 		//              ------------------------------------------------------------
- 		// Port key :   | User key                       |      Speed       |Duplex|
- 		//              ------------------------------------------------------------
- 		//              16                               6               1 0
- 		port->actor_admin_port_key = 0;	// initialize this parameter
+ 		/* key is determined according to the link speed, duplex and user key(which
+ 		 * is yet not supported)
+ 		 *              ------------------------------------------------------------
+ 		 * Port key :   | User key                       |      Speed       |Duplex|
+ 		 *              ------------------------------------------------------------
+ 		 *              16                               6               1 0
+ 		 */
+ 		port->actor_admin_port_key = 0;	/* initialize this parameter */
  		port->actor_admin_port_key |= __get_duplex(port);
  		port->actor_admin_port_key |= (__get_link_speed(port) << 1);
  		port->actor_oper_port_key = port->actor_admin_port_key;
- 		// if the port is not full duplex, then the port should be not lacp Enabled
+ 		/* if the port is not full duplex, then the port should be not lacp Enabled */
  		if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
  			port->sm_vars &= ~AD_PORT_LACP_ENABLED;
- 		// actor system is the bond's system
+ 		/* actor system is the bond's system */
  		port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
- 		// tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second)
+ 		/* tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second) */
  		port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
  		port->aggregator = NULL;
  		port->next_port_in_aggregator = NULL;
  
  		__disable_port(port);
  
- 		// aggregator initialization
+ 		/* aggregator initialization */
  		aggregator = &(SLAVE_AD_INFO(slave).aggregator);
  
  		ad_initialize_agg(aggregator);
@@@ -1897,8 -1906,6 +1906,6 @@@
  		aggregator->is_active = 0;
  		aggregator->num_of_ports = 0;
  	}
- 
- 	return 0;
  }
  
  /**
@@@ -2069,17 -2076,18 +2076,18 @@@ void bond_3ad_state_machine_handler(str
  	struct port *port;
  
  	read_lock(&bond->lock);
+ 	rcu_read_lock();
  
- 	//check if there are any slaves
+ 	/* check if there are any slaves */
  	if (!bond_has_slaves(bond))
  		goto re_arm;
  
- 	// check if agg_select_timer timer after initialize is timed out
+ 	/* check if agg_select_timer timer after initialize is timed out */
  	if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
- 		slave = bond_first_slave(bond);
+ 		slave = bond_first_slave_rcu(bond);
  		port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
  
- 		// select the active aggregator for the bond
+ 		/* select the active aggregator for the bond */
  		if (port) {
  			if (!port->slave) {
  				pr_warning("%s: Warning: bond's first port is uninitialized\n",
@@@ -2093,8 -2101,8 +2101,8 @@@
  		bond_3ad_set_carrier(bond);
  	}
  
- 	// for each port run the state machines
- 	bond_for_each_slave(bond, slave, iter) {
+ 	/* for each port run the state machines */
+ 	bond_for_each_slave_rcu(bond, slave, iter) {
  		port = &(SLAVE_AD_INFO(slave).port);
  		if (!port->slave) {
  			pr_warning("%s: Warning: Found an uninitialized port\n",
@@@ -2114,7 -2122,7 +2122,7 @@@
  		ad_mux_machine(port);
  		ad_tx_machine(port);
  
- 		// turn off the BEGIN bit, since we already handled it
+ 		/* turn off the BEGIN bit, since we already handled it */
  		if (port->sm_vars & AD_PORT_BEGIN)
  			port->sm_vars &= ~AD_PORT_BEGIN;
  
@@@ -2122,9 -2130,9 +2130,9 @@@
  	}
  
  re_arm:
- 	queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
- 
+ 	rcu_read_unlock();
  	read_unlock(&bond->lock);
+ 	queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
  }
  
  /**
@@@ -2201,25 -2209,20 +2209,25 @@@ void bond_3ad_adapter_speed_changed(str
  
  	port = &(SLAVE_AD_INFO(slave).port);
  
 -	// if slave is null, the whole port is not initialized
 +	/* if slave is null, the whole port is not initialized */
  	if (!port->slave) {
  		pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
  			   slave->bond->dev->name, slave->dev->name);
  		return;
  	}
  
 +	__get_state_machine_lock(port);
 +
  	port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
  	port->actor_oper_port_key = port->actor_admin_port_key |=
  		(__get_link_speed(port) << 1);
  	pr_debug("Port %d changed speed\n", port->actor_port_number);
 -	// there is no need to reselect a new aggregator, just signal the
 -	// state machines to reinitialize
 +	/* there is no need to reselect a new aggregator, just signal the
 +	 * state machines to reinitialize
 +	 */
  	port->sm_vars |= AD_PORT_BEGIN;
 +
 +	__release_state_machine_lock(port);
  }
  
  /**
@@@ -2234,25 -2237,20 +2242,25 @@@ void bond_3ad_adapter_duplex_changed(st
  
  	port = &(SLAVE_AD_INFO(slave).port);
  
 -	// if slave is null, the whole port is not initialized
 +	/* if slave is null, the whole port is not initialized */
  	if (!port->slave) {
  		pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
  			   slave->bond->dev->name, slave->dev->name);
  		return;
  	}
  
 +	__get_state_machine_lock(port);
 +
  	port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
  	port->actor_oper_port_key = port->actor_admin_port_key |=
  		__get_duplex(port);
  	pr_debug("Port %d changed duplex\n", port->actor_port_number);
 -	// there is no need to reselect a new aggregator, just signal the
 -	// state machines to reinitialize
 +	/* there is no need to reselect a new aggregator, just signal the
 +	 * state machines to reinitialize
 +	 */
  	port->sm_vars |= AD_PORT_BEGIN;
 +
 +	__release_state_machine_lock(port);
  }
  
  /**
@@@ -2268,21 -2266,15 +2276,21 @@@ void bond_3ad_handle_link_change(struc
  
  	port = &(SLAVE_AD_INFO(slave).port);
  
 -	// if slave is null, the whole port is not initialized
 +	/* if slave is null, the whole port is not initialized */
  	if (!port->slave) {
  		pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
  			   slave->bond->dev->name, slave->dev->name);
  		return;
  	}
  
 -	// on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
 -	// on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
 +	__get_state_machine_lock(port);
 +	/* on link down we are zeroing duplex and speed since
 +	 * some of the adaptors(ce1000.lan) report full duplex/speed
 +	 * instead of N/A(duplex) / 0(speed).
 +	 *
 +	 * on link up we are forcing recheck on the duplex and speed since
 +	 * some of he adaptors(ce1000.lan) report.
 +	 */
  	if (link == BOND_LINK_UP) {
  		port->is_enabled = true;
  		port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@@ -2298,15 -2290,10 +2306,15 @@@
  		port->actor_oper_port_key = (port->actor_admin_port_key &=
  					     ~AD_SPEED_KEY_BITS);
  	}
 -	//BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
 -	// there is no need to reselect a new aggregator, just signal the
 -	// state machines to reinitialize
 +	pr_debug("Port %d changed link status to %s",
 +		port->actor_port_number,
 +		(link == BOND_LINK_UP) ? "UP" : "DOWN");
 +	/* there is no need to reselect a new aggregator, just signal the
 +	 * state machines to reinitialize
 +	 */
  	port->sm_vars |= AD_PORT_BEGIN;
 +
 +	__release_state_machine_lock(port);
  }
  
  /*
@@@ -2324,7 -2311,9 +2332,9 @@@ int bond_3ad_set_carrier(struct bondin
  	struct aggregator *active;
  	struct slave *first_slave;
  
- 	first_slave = bond_first_slave(bond);
+ 	rcu_read_lock();
+ 	first_slave = bond_first_slave_rcu(bond);
+ 	rcu_read_unlock();
  	if (!first_slave)
  		return 0;
  	active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
@@@ -2406,13 -2395,12 +2416,12 @@@ int bond_3ad_xmit_xor(struct sk_buff *s
  	struct list_head *iter;
  	int slaves_in_agg;
  	int slave_agg_no;
- 	int res = 1;
  	int agg_id;
  
  	if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
  		pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
  			 dev->name);
- 		goto out;
+ 		goto err_free;
  	}
  
  	slaves_in_agg = ad_info.ports;
@@@ -2420,7 -2408,7 +2429,7 @@@
  
  	if (slaves_in_agg == 0) {
  		pr_debug("%s: Error: active aggregator is empty\n", dev->name);
- 		goto out;
+ 		goto err_free;
  	}
  
  	slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
@@@ -2439,7 -2427,7 +2448,7 @@@
  		}
  
  		if (SLAVE_IS_OK(slave)) {
- 			res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ 			bond_dev_queue_xmit(bond, skb, slave->dev);
  			goto out;
  		}
  	}
@@@ -2447,21 -2435,22 +2456,22 @@@
  	if (slave_agg_no >= 0) {
  		pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
  		       dev->name, agg_id);
- 		goto out;
+ 		goto err_free;
  	}
  
  	/* we couldn't find any suitable slave after the agg_no, so use the
  	 * first suitable found, if found. */
  	if (first_ok_slave)
- 		res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+ 		bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+ 	else
+ 		goto err_free;
  
  out:
- 	if (res) {
- 		/* no suitable interface, frame not sent */
- 		kfree_skb(skb);
- 	}
- 
  	return NETDEV_TX_OK;
+ err_free:
+ 	/* no suitable interface, frame not sent */
+ 	kfree_skb(skb);
+ 	goto out;
  }
  
  int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
diff --combined drivers/net/can/mscan/mpc5xxx_can.c
index 6b0c995,035e235..4472529
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@@ -16,8 -16,7 +16,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
-  * along with this program; if not, write to the Free Software
-  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include <linux/kernel.h>
@@@ -109,170 -108,135 +108,170 @@@ static u32 mpc52xx_can_get_clock(struc
  #endif /* CONFIG_PPC_MPC52xx */
  
  #ifdef CONFIG_PPC_MPC512x
 -struct mpc512x_clockctl {
 -	u32 spmr;		/* System PLL Mode Reg */
 -	u32 sccr[2];		/* System Clk Ctrl Reg 1 & 2 */
 -	u32 scfr1;		/* System Clk Freq Reg 1 */
 -	u32 scfr2;		/* System Clk Freq Reg 2 */
 -	u32 reserved;
 -	u32 bcr;		/* Bread Crumb Reg */
 -	u32 pccr[12];		/* PSC Clk Ctrl Reg 0-11 */
 -	u32 spccr;		/* SPDIF Clk Ctrl Reg */
 -	u32 cccr;		/* CFM Clk Ctrl Reg */
 -	u32 dccr;		/* DIU Clk Cnfg Reg */
 -	u32 mccr[4];		/* MSCAN Clk Ctrl Reg 1-3 */
 -};
 -
 -static struct of_device_id mpc512x_clock_ids[] = {
 -	{ .compatible = "fsl,mpc5121-clock", },
 -	{}
 -};
 -
  static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
 -				 const char *clock_name, int *mscan_clksrc)
 +				 const char *clock_source, int *mscan_clksrc)
  {
 -	struct mpc512x_clockctl __iomem *clockctl;
 -	struct device_node *np_clock;
 -	struct clk *sys_clk, *ref_clk;
 -	int plen, clockidx, clocksrc = -1;
 -	u32 sys_freq, val, clockdiv = 1, freq = 0;
 -	const u32 *pval;
 -
 -	np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
 -	if (!np_clock) {
 -		dev_err(&ofdev->dev, "couldn't find clock node\n");
 -		return 0;
 -	}
 -	clockctl = of_iomap(np_clock, 0);
 -	if (!clockctl) {
 -		dev_err(&ofdev->dev, "couldn't map clock registers\n");
 -		goto exit_put;
 -	}
 +	struct device_node *np;
 +	u32 clockdiv;
 +	enum {
 +		CLK_FROM_AUTO,
 +		CLK_FROM_IPS,
 +		CLK_FROM_SYS,
 +		CLK_FROM_REF,
 +	} clk_from;
 +	struct clk *clk_in, *clk_can;
 +	unsigned long freq_calc;
 +	struct mscan_priv *priv;
 +	struct clk *clk_ipg;
  
 -	/* Determine the MSCAN device index from the peripheral's
 -	 * physical address. Register address offsets against the
 -	 * IMMR base are:  0x1300, 0x1380, 0x2300, 0x2380
 +	/* the caller passed in the clock source spec that was read from
 +	 * the device tree, get the optional clock divider as well
  	 */
 -	pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
 -	BUG_ON(!pval || plen < sizeof(*pval));
 -	clockidx = (*pval & 0x80) ? 1 : 0;
 -	if (*pval & 0x2000)
 -		clockidx += 2;
 +	np = ofdev->dev.of_node;
 +	clockdiv = 1;
 +	of_property_read_u32(np, "fsl,mscan-clock-divider", &clockdiv);
 +	dev_dbg(&ofdev->dev, "device tree specs: clk src[%s] div[%d]\n",
 +		clock_source ? clock_source : "<NULL>", clockdiv);
 +
 +	/* when clock-source is 'ip', the CANCTL1[CLKSRC] bit needs to
 +	 * get set, and the 'ips' clock is the input to the MSCAN
 +	 * component
 +	 *
 +	 * for clock-source values of 'ref' or 'sys' the CANCTL1[CLKSRC]
 +	 * bit needs to get cleared, an optional clock-divider may have
 +	 * been specified (the default value is 1), the appropriate
 +	 * MSCAN related MCLK is the input to the MSCAN component
 +	 *
 +	 * in the absence of a clock-source spec, first an optimal clock
 +	 * gets determined based on the 'sys' clock, if that fails the
 +	 * 'ref' clock is used
 +	 */
 +	clk_from = CLK_FROM_AUTO;
 +	if (clock_source) {
 +		/* interpret the device tree's spec for the clock source */
 +		if (!strcmp(clock_source, "ip"))
 +			clk_from = CLK_FROM_IPS;
 +		else if (!strcmp(clock_source, "sys"))
 +			clk_from = CLK_FROM_SYS;
 +		else if (!strcmp(clock_source, "ref"))
 +			clk_from = CLK_FROM_REF;
 +		else
 +			goto err_invalid;
 +		dev_dbg(&ofdev->dev, "got a clk source spec[%d]\n", clk_from);
 +	}
 +	if (clk_from == CLK_FROM_AUTO) {
 +		/* no spec so far, try the 'sys' clock; round to the
 +		 * next MHz and see if we can get a multiple of 16MHz
 +		 */
 +		dev_dbg(&ofdev->dev, "no clk source spec, trying SYS\n");
 +		clk_in = devm_clk_get(&ofdev->dev, "sys");
 +		if (IS_ERR(clk_in))
 +			goto err_notavail;
 +		freq_calc = clk_get_rate(clk_in);
 +		freq_calc +=  499999;
 +		freq_calc /= 1000000;
 +		freq_calc *= 1000000;
 +		if ((freq_calc % 16000000) == 0) {
 +			clk_from = CLK_FROM_SYS;
 +			clockdiv = freq_calc / 16000000;
 +			dev_dbg(&ofdev->dev,
 +				"clk fit, sys[%lu] div[%d] freq[%lu]\n",
 +				freq_calc, clockdiv, freq_calc / clockdiv);
 +		}
 +	}
 +	if (clk_from == CLK_FROM_AUTO) {
 +		/* no spec so far, use the 'ref' clock */
 +		dev_dbg(&ofdev->dev, "no clk source spec, trying REF\n");
 +		clk_in = devm_clk_get(&ofdev->dev, "ref");
 +		if (IS_ERR(clk_in))
 +			goto err_notavail;
 +		clk_from = CLK_FROM_REF;
 +		freq_calc = clk_get_rate(clk_in);
 +		dev_dbg(&ofdev->dev,
 +			"clk fit, ref[%lu] (no div) freq[%lu]\n",
 +			freq_calc, freq_calc);
 +	}
  
 -	/*
 -	 * Clock source and divider selection: 3 different clock sources
 -	 * can be selected: "ip", "ref" or "sys". For the latter two, a
 -	 * clock divider can be defined as well. If the clock source is
 -	 * not specified by the device tree, we first try to find an
 -	 * optimal CAN source clock based on the system clock. If that
 -	 * is not posslible, the reference clock will be used.
 +	/* select IPS or MCLK as the MSCAN input (returned to the caller),
 +	 * setup the MCLK mux source and rate if applicable, apply the
 +	 * optionally specified or derived above divider, and determine
 +	 * the actual resulting clock rate to return to the caller
  	 */
 -	if (clock_name && !strcmp(clock_name, "ip")) {
 +	switch (clk_from) {
 +	case CLK_FROM_IPS:
 +		clk_can = devm_clk_get(&ofdev->dev, "ips");
 +		if (IS_ERR(clk_can))
 +			goto err_notavail;
 +		priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
 +		priv->clk_can = clk_can;
 +		freq_calc = clk_get_rate(clk_can);
  		*mscan_clksrc = MSCAN_CLKSRC_IPS;
 -		freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
 -	} else {
 +		dev_dbg(&ofdev->dev, "clk from IPS, clksrc[%d] freq[%lu]\n",
 +			*mscan_clksrc, freq_calc);
 +		break;
 +	case CLK_FROM_SYS:
 +	case CLK_FROM_REF:
 +		clk_can = devm_clk_get(&ofdev->dev, "mclk");
 +		if (IS_ERR(clk_can))
 +			goto err_notavail;
 +		priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
 +		priv->clk_can = clk_can;
 +		if (clk_from == CLK_FROM_SYS)
 +			clk_in = devm_clk_get(&ofdev->dev, "sys");
 +		if (clk_from == CLK_FROM_REF)
 +			clk_in = devm_clk_get(&ofdev->dev, "ref");
 +		if (IS_ERR(clk_in))
 +			goto err_notavail;
 +		clk_set_parent(clk_can, clk_in);
 +		freq_calc = clk_get_rate(clk_in);
 +		freq_calc /= clockdiv;
 +		clk_set_rate(clk_can, freq_calc);
 +		freq_calc = clk_get_rate(clk_can);
  		*mscan_clksrc = MSCAN_CLKSRC_BUS;
 -
 -		pval = of_get_property(ofdev->dev.of_node,
 -				       "fsl,mscan-clock-divider", &plen);
 -		if (pval && plen == sizeof(*pval))
 -			clockdiv = *pval;
 -		if (!clockdiv)
 -			clockdiv = 1;
 -
 -		if (!clock_name || !strcmp(clock_name, "sys")) {
 -			sys_clk = devm_clk_get(&ofdev->dev, "sys_clk");
 -			if (IS_ERR(sys_clk)) {
 -				dev_err(&ofdev->dev, "couldn't get sys_clk\n");
 -				goto exit_unmap;
 -			}
 -			/* Get and round up/down sys clock rate */
 -			sys_freq = 1000000 *
 -				((clk_get_rate(sys_clk) + 499999) / 1000000);
 -
 -			if (!clock_name) {
 -				/* A multiple of 16 MHz would be optimal */
 -				if ((sys_freq % 16000000) == 0) {
 -					clocksrc = 0;
 -					clockdiv = sys_freq / 16000000;
 -					freq = sys_freq / clockdiv;
 -				}
 -			} else {
 -				clocksrc = 0;
 -				freq = sys_freq / clockdiv;
 -			}
 -		}
 -
 -		if (clocksrc < 0) {
 -			ref_clk = devm_clk_get(&ofdev->dev, "ref_clk");
 -			if (IS_ERR(ref_clk)) {
 -				dev_err(&ofdev->dev, "couldn't get ref_clk\n");
 -				goto exit_unmap;
 -			}
 -			clocksrc = 1;
 -			freq = clk_get_rate(ref_clk) / clockdiv;
 -		}
 +		dev_dbg(&ofdev->dev, "clk from MCLK, clksrc[%d] freq[%lu]\n",
 +			*mscan_clksrc, freq_calc);
 +		break;
 +	default:
 +		goto err_invalid;
  	}
  
 -	/* Disable clock */
 -	out_be32(&clockctl->mccr[clockidx], 0x0);
 -	if (clocksrc >= 0) {
 -		/* Set source and divider */
 -		val = (clocksrc << 14) | ((clockdiv - 1) << 17);
 -		out_be32(&clockctl->mccr[clockidx], val);
 -		/* Enable clock */
 -		out_be32(&clockctl->mccr[clockidx], val | 0x10000);
 -	}
 +	/* the above clk_can item is used for the bitrate, access to
 +	 * the peripheral's register set needs the clk_ipg item
 +	 */
 +	clk_ipg = devm_clk_get(&ofdev->dev, "ipg");
 +	if (IS_ERR(clk_ipg))
 +		goto err_notavail_ipg;
 +	if (clk_prepare_enable(clk_ipg))
 +		goto err_notavail_ipg;
 +	priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
 +	priv->clk_ipg = clk_ipg;
 +
 +	/* return the determined clock source rate */
 +	return freq_calc;
 +
 +err_invalid:
 +	dev_err(&ofdev->dev, "invalid clock source specification\n");
 +	/* clock source rate could not get determined */
 +	return 0;
  
 -	/* Enable MSCAN clock domain */
 -	val = in_be32(&clockctl->sccr[1]);
 -	if (!(val & (1 << 25)))
 -		out_be32(&clockctl->sccr[1], val | (1 << 25));
 +err_notavail:
 +	dev_err(&ofdev->dev, "cannot acquire or setup bitrate clock source\n");
 +	/* clock source rate could not get determined */
 +	return 0;
  
 -	dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
 -		*mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
 -		clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
 +err_notavail_ipg:
 +	dev_err(&ofdev->dev, "cannot acquire or setup register clock\n");
 +	/* clock source rate could not get determined */
 +	return 0;
 +}
  
 -exit_unmap:
 -	iounmap(clockctl);
 -exit_put:
 -	of_node_put(np_clock);
 -	return freq;
 +static void mpc512x_can_put_clock(struct platform_device *ofdev)
 +{
 +	struct mscan_priv *priv;
 +
 +	priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
 +	if (priv->clk_ipg)
 +		clk_disable_unprepare(priv->clk_ipg);
  }
  #else /* !CONFIG_PPC_MPC512x */
  static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
@@@ -280,7 -244,6 +279,7 @@@
  {
  	return 0;
  }
 +#define mpc512x_can_put_clock NULL
  #endif /* CONFIG_PPC_MPC512x */
  
  static const struct of_device_id mpc5xxx_can_table[];
@@@ -422,13 -385,11 +421,13 @@@ static int mpc5xxx_can_resume(struct pl
  static const struct mpc5xxx_can_data mpc5200_can_data = {
  	.type = MSCAN_TYPE_MPC5200,
  	.get_clock = mpc52xx_can_get_clock,
 +	/* .put_clock not applicable */
  };
  
  static const struct mpc5xxx_can_data mpc5121_can_data = {
  	.type = MSCAN_TYPE_MPC5121,
  	.get_clock = mpc512x_can_get_clock,
 +	.put_clock = mpc512x_can_put_clock,
  };
  
  static const struct of_device_id mpc5xxx_can_table[] = {
diff --combined drivers/net/ethernet/8390/hydra.c
index f615fde,d8b86c8..0fe19d6
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@@ -66,6 -66,7 +66,7 @@@ static void hydra_block_input(struct ne
  static void hydra_block_output(struct net_device *dev, int count,
  			       const unsigned char *buf, int start_page);
  static void hydra_remove_one(struct zorro_dev *z);
+ static u32 hydra_msg_enable;
  
  static struct zorro_device_id hydra_zorro_tbl[] = {
      { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
@@@ -113,12 -114,13 +114,13 @@@ static const struct net_device_ops hydr
  static int hydra_init(struct zorro_dev *z)
  {
      struct net_device *dev;
 -    unsigned long board = ZTWO_VADDR(z->resource.start);
 +    unsigned long board = (unsigned long)ZTWO_VADDR(z->resource.start);
      unsigned long ioaddr = board+HYDRA_NIC_BASE;
      const char name[] = "NE2000";
      int start_page, stop_page;
      int j;
      int err;
+     struct ei_device *ei_local;
  
      static u32 hydra_offsets[16] = {
  	0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
@@@ -137,6 -139,8 +139,8 @@@
      start_page = NESM_START_PG;
      stop_page = NESM_STOP_PG;
  
+     ei_local = netdev_priv(dev);
+     ei_local->msg_enable = hydra_msg_enable;
      dev->base_addr = ioaddr;
      dev->irq = IRQ_AMIGA_PORTS;
  
@@@ -187,15 -191,16 +191,16 @@@ static int hydra_open(struct net_devic
  
  static int hydra_close(struct net_device *dev)
  {
-     if (ei_debug > 1)
- 	printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+     struct ei_device *ei_local = netdev_priv(dev);
+ 
+     netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
      __ei_close(dev);
      return 0;
  }
  
  static void hydra_reset_8390(struct net_device *dev)
  {
-     printk(KERN_INFO "Hydra hw reset not there\n");
+     netdev_info(dev, "Hydra hw reset not there\n");
  }
  
  static void hydra_get_8390_hdr(struct net_device *dev,
diff --combined drivers/net/ethernet/8390/zorro8390.c
index ae2a12b,7b373e65..8308728
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@@ -44,6 -44,8 +44,8 @@@
  static const char version[] =
  	"8390.c:v1.10cvs 9/23/94 Donald Becker (becker at cesdis.gsfc.nasa.gov)\n";
  
+ static u32 zorro8390_msg_enable;
+ 
  #include "lib8390.c"
  
  #define DRV_NAME	"zorro8390"
@@@ -86,9 -88,9 +88,9 @@@ static struct card_info 
  static void zorro8390_reset_8390(struct net_device *dev)
  {
  	unsigned long reset_start_time = jiffies;
+ 	struct ei_device *ei_local = netdev_priv(dev);
  
- 	if (ei_debug > 1)
- 		netdev_dbg(dev, "resetting - t=%ld...\n", jiffies);
+ 	netif_dbg(ei_local, hw, dev, "resetting - t=%ld...\n", jiffies);
  
  	z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
  
@@@ -119,8 -121,9 +121,9 @@@ static void zorro8390_get_8390_hdr(stru
  	 * If it does, it's the last thing you'll see
  	 */
  	if (ei_status.dmaing) {
- 		netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
- 			   __func__, ei_status.dmaing, ei_status.irqlock);
+ 		netdev_warn(dev,
+ 			    "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ 			    __func__, ei_status.dmaing, ei_status.irqlock);
  		return;
  	}
  
@@@ -230,7 -233,7 +233,7 @@@ static void zorro8390_block_output(stru
  	while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
  		if (time_after(jiffies, dma_start + 2 * HZ / 100)) {
  					/* 20ms */
- 			netdev_err(dev, "timeout waiting for Tx RDC\n");
+ 			netdev_warn(dev, "timeout waiting for Tx RDC\n");
  			zorro8390_reset_8390(dev);
  			__NS8390_init(dev, 1);
  			break;
@@@ -248,8 -251,9 +251,9 @@@ static int zorro8390_open(struct net_de
  
  static int zorro8390_close(struct net_device *dev)
  {
- 	if (ei_debug > 1)
- 		netdev_dbg(dev, "Shutting down ethercard\n");
+ 	struct ei_device *ei_local = netdev_priv(dev);
+ 
+ 	netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard\n");
  	__ei_close(dev);
  	return 0;
  }
@@@ -287,12 -291,13 +291,13 @@@ static const struct net_device_ops zorr
  };
  
  static int zorro8390_init(struct net_device *dev, unsigned long board,
 -			  const char *name, unsigned long ioaddr)
 +			  const char *name, void __iomem *ioaddr)
  {
  	int i;
  	int err;
  	unsigned char SA_prom[32];
  	int start_page, stop_page;
+ 	struct ei_device *ei_local = netdev_priv(dev);
  	static u32 zorro8390_offsets[16] = {
  		0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
  		0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
@@@ -354,7 -359,7 +359,7 @@@
  	start_page = NESM_START_PG;
  	stop_page = NESM_STOP_PG;
  
 -	dev->base_addr = ioaddr;
 +	dev->base_addr = (unsigned long)ioaddr;
  	dev->irq = IRQ_AMIGA_PORTS;
  
  	/* Install the Interrupt handler */
@@@ -383,6 -388,9 +388,9 @@@
  
  	dev->netdev_ops = &zorro8390_netdev_ops;
  	__NS8390_init(dev, 0);
+ 
+ 	ei_local->msg_enable = zorro8390_msg_enable;
+ 
  	err = register_netdev(dev);
  	if (err) {
  		free_irq(IRQ_AMIGA_PORTS, dev);
diff --combined drivers/net/ethernet/arc/emac_main.c
index 248baf6,eedf2a5..eeecc29
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@@ -381,17 -381,7 +381,7 @@@ static int arc_emac_open(struct net_dev
  	phy_dev->autoneg = AUTONEG_ENABLE;
  	phy_dev->speed = 0;
  	phy_dev->duplex = 0;
- 	phy_dev->advertising = phy_dev->supported;
- 
- 	if (priv->max_speed > 100) {
- 		phy_dev->advertising &= PHY_GBIT_FEATURES;
- 	} else if (priv->max_speed <= 100) {
- 		phy_dev->advertising &= PHY_BASIC_FEATURES;
- 		if (priv->max_speed <= 10) {
- 			phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
- 			phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
- 		}
- 	}
+ 	phy_dev->advertising &= phy_dev->supported;
  
  	priv->last_rx_bd = 0;
  
@@@ -565,8 -555,6 +555,8 @@@ static int arc_emac_tx(struct sk_buff *
  	/* Make sure pointer to data buffer is set */
  	wmb();
  
 +	skb_tx_timestamp(skb);
 +
  	*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
  
  	/* Increment index to point to the next BD */
@@@ -581,6 -569,8 +571,6 @@@
  
  	arc_reg_set(priv, R_STATUS, TXPL_MASK);
  
 -	skb_tx_timestamp(skb);
 -
  	return NETDEV_TX_OK;
  }
  
@@@ -704,14 -694,6 +694,6 @@@ static int arc_emac_probe(struct platfo
  	/* Set poll rate so that it polls every 1 ms */
  	arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
  
- 	/* Get max speed of operation from device tree */
- 	if (of_property_read_u32(pdev->dev.of_node, "max-speed",
- 				 &priv->max_speed)) {
- 		dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
- 		err = -EINVAL;
- 		goto out;
- 	}
- 
  	ndev->irq = irq;
  	dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
  
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 14ffb6e,08f8047..2beb543
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@@ -5932,6 -5932,7 +5932,7 @@@
  #define MISC_REGISTERS_RESET_REG_1_RST_NIG			 (0x1<<7)
  #define MISC_REGISTERS_RESET_REG_1_RST_PXP			 (0x1<<26)
  #define MISC_REGISTERS_RESET_REG_1_RST_PXPV			 (0x1<<27)
+ #define MISC_REGISTERS_RESET_REG_1_RST_XSEM			 (0x1<<22)
  #define MISC_REGISTERS_RESET_REG_1_SET				 0x584
  #define MISC_REGISTERS_RESET_REG_2_CLEAR			 0x598
  #define MISC_REGISTERS_RESET_REG_2_MSTAT0			 (0x1<<24)
@@@ -7179,7 -7180,6 +7180,7 @@@ Theotherbitsarereservedandshouldbezero*
  #define MDIO_WC_REG_RX1_PCI_CTRL			0x80ca
  #define MDIO_WC_REG_RX2_PCI_CTRL			0x80da
  #define MDIO_WC_REG_RX3_PCI_CTRL			0x80ea
 +#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI		0x80fa
  #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G		0x8104
  #define MDIO_WC_REG_XGXS_STATUS3			0x8129
  #define MDIO_WC_REG_PAR_DET_10G_STATUS			0x8130
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 3dc2537,e5f7985..a20fd34
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@@ -208,7 -208,7 +208,7 @@@ static int bnx2x_get_vf_id(struct bnx2
  		return -EINVAL;
  	}
  
 -	BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
 +	DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
  
  	*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  
@@@ -800,14 -800,18 +800,18 @@@ int bnx2x_vfpf_config_rss(struct bnx2x 
  	}
  
  	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- 		BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
- 			  resp->hdr.status);
- 		rc = -EINVAL;
+ 		/* Since older drivers don't support this feature (and VF has
+ 		 * no way of knowing other than failing this), don't propagate
+ 		 * an error in this case.
+ 		 */
+ 		DP(BNX2X_MSG_IOV,
+ 		   "Failed to send rss message to PF over VF-PF channel [%d]\n",
+ 		   resp->hdr.status);
  	}
  out:
  	bnx2x_vfpf_finalize(bp, &req->first_tlv);
  
- 	return 0;
+ 	return rc;
  }
  
  int bnx2x_vfpf_set_mcast(struct net_device *dev)
@@@ -1416,6 -1420,14 +1420,14 @@@ static void bnx2x_vf_mbx_setup_q(struc
  				setup_q->rxq.cache_line_log;
  			rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  
+ 			/* rx setup - multicast engine */
+ 			if (bnx2x_vfq_is_leading(q)) {
+ 				u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+ 
+ 				rxq_params->mcast_engine_id = mcast_id;
+ 				__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ 			}
+ 
  			bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  						 q->index, q->sb_idx);
  		}
@@@ -1702,7 -1714,7 +1714,7 @@@ static void bnx2x_vf_mbx_set_q_filters(
  
  		/* ...and only the mac set by the ndo */
  		if (filters->n_mac_vlan_filters == 1 &&
- 		    memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+ 		    !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
  			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  				  vf->abs_vfid);
  
diff --combined drivers/net/ethernet/broadcom/tg3.c
index 15a66e4,d88ef55..c37e9f2
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@@ -37,6 -37,7 +37,7 @@@
  #include <linux/mii.h>
  #include <linux/phy.h>
  #include <linux/brcmphy.h>
+ #include <linux/if.h>
  #include <linux/if_vlan.h>
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@@ -94,10 -95,10 +95,10 @@@ static inline void _tg3_flag_clear(enu
  
  #define DRV_MODULE_NAME		"tg3"
  #define TG3_MAJ_NUM			3
- #define TG3_MIN_NUM			134
+ #define TG3_MIN_NUM			136
  #define DRV_MODULE_VERSION	\
  	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
- #define DRV_MODULE_RELDATE	"Sep 16, 2013"
+ #define DRV_MODULE_RELDATE	"Jan 03, 2014"
  
  #define RESET_KIND_SHUTDOWN	0
  #define RESET_KIND_INIT		1
@@@ -208,6 -209,9 +209,9 @@@
  
  #define TG3_RAW_IP_ALIGN 2
  
+ #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
+ #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
+ 
  #define TG3_FW_UPDATE_TIMEOUT_SEC	5
  #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
  
@@@ -3948,32 -3952,41 +3952,41 @@@ static int tg3_load_tso_firmware(struc
  	return 0;
  }
  
+ /* tp->lock is held. */
+ static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+ {
+ 	u32 addr_high, addr_low;
+ 
+ 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
+ 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ 		    (mac_addr[4] <<  8) | mac_addr[5]);
+ 
+ 	if (index < 4) {
+ 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
+ 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
+ 	} else {
+ 		index -= 4;
+ 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
+ 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
+ 	}
+ }
  
  /* tp->lock is held. */
  static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
  {
- 	u32 addr_high, addr_low;
+ 	u32 addr_high;
  	int i;
  
  	for (i = 0; i < 4; i++) {
  		if (i == 1 && skip_mac_1)
  			continue;
- 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
- 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+ 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
  	}
  
  	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
  	    tg3_asic_rev(tp) == ASIC_REV_5704) {
- 		for (i = 0; i < 12; i++) {
- 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
- 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
- 		}
+ 		for (i = 4; i < 16; i++)
+ 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
  	}
  
  	addr_high = (tp->dev->dev_addr[0] +
@@@ -4403,9 -4416,12 +4416,12 @@@ static void tg3_phy_copper_begin(struc
  			if (tg3_flag(tp, WOL_SPEED_100MB))
  				adv |= ADVERTISED_100baseT_Half |
  				       ADVERTISED_100baseT_Full;
- 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
- 				adv |= ADVERTISED_1000baseT_Half |
- 				       ADVERTISED_1000baseT_Full;
+ 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
+ 				if (!(tp->phy_flags &
+ 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ 					adv |= ADVERTISED_1000baseT_Half;
+ 				adv |= ADVERTISED_1000baseT_Full;
+ 			}
  
  			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
  		} else {
@@@ -7622,7 -7638,7 +7638,7 @@@ static inline int tg3_4g_overflow_test(
  {
  	u32 base = (u32) mapping & 0xffffffff;
  
 -	return (base > 0xffffdcc0) && (base + len + 8 < base);
 +	return base + len + 8 < base;
  }
  
  /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
@@@ -8925,6 -8941,49 +8941,49 @@@ static void tg3_restore_pci_state(struc
  	}
  }
  
+ static void tg3_override_clk(struct tg3 *tp)
+ {
+ 	u32 val;
+ 
+ 	switch (tg3_asic_rev(tp)) {
+ 	case ASIC_REV_5717:
+ 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+ 		     TG3_CPMU_MAC_ORIDE_ENABLE);
+ 		break;
+ 
+ 	case ASIC_REV_5719:
+ 	case ASIC_REV_5720:
+ 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ 		break;
+ 
+ 	default:
+ 		return;
+ 	}
+ }
+ 
+ static void tg3_restore_clk(struct tg3 *tp)
+ {
+ 	u32 val;
+ 
+ 	switch (tg3_asic_rev(tp)) {
+ 	case ASIC_REV_5717:
+ 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
+ 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
+ 		break;
+ 
+ 	case ASIC_REV_5719:
+ 	case ASIC_REV_5720:
+ 		val = tr32(TG3_CPMU_CLCK_ORIDE);
+ 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ 		break;
+ 
+ 	default:
+ 		return;
+ 	}
+ }
+ 
  /* tp->lock is held. */
  static int tg3_chip_reset(struct tg3 *tp)
  {
@@@ -9013,6 -9072,13 +9072,13 @@@
  		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
  	}
  
+ 	/* Set the clock to the highest frequency to avoid timeouts. With link
+ 	 * aware mode, the clock speed could be slow and bootcode does not
+ 	 * complete within the expected time. Override the clock to allow the
+ 	 * bootcode to finish sooner and then restore it.
+ 	 */
+ 	tg3_override_clk(tp);
+ 
  	/* Manage gphy power for all CPMU absent PCIe devices. */
  	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
  		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
@@@ -9151,10 -9217,7 +9217,7 @@@
  		tw32(0x7c00, val | (1 << 25));
  	}
  
- 	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
- 		val = tr32(TG3_CPMU_CLCK_ORIDE);
- 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
- 	}
+ 	tg3_restore_clk(tp);
  
  	/* Reprobe ASF enable state.  */
  	tg3_flag_clear(tp, ENABLE_ASF);
@@@ -9186,6 -9249,7 +9249,7 @@@
  
  static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
  static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
+ static void __tg3_set_rx_mode(struct net_device *);
  
  /* tp->lock is held. */
  static int tg3_halt(struct tg3 *tp, int kind, bool silent)
@@@ -9246,6 -9310,7 +9310,7 @@@ static int tg3_set_mac_addr(struct net_
  	}
  	spin_lock_bh(&tp->lock);
  	__tg3_set_mac_addr(tp, skip_mac_1);
+ 	__tg3_set_rx_mode(dev);
  	spin_unlock_bh(&tp->lock);
  
  	return err;
@@@ -9634,6 -9699,20 +9699,20 @@@ static void __tg3_set_rx_mode(struct ne
  		tw32(MAC_HASH_REG_3, mc_filter[3]);
  	}
  
+ 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
+ 		rx_mode |= RX_MODE_PROMISC;
+ 	} else if (!(dev->flags & IFF_PROMISC)) {
+ 		/* Add all entries into to the mac addr filter list */
+ 		int i = 0;
+ 		struct netdev_hw_addr *ha;
+ 
+ 		netdev_for_each_uc_addr(ha, dev) {
+ 			__tg3_set_one_mac_addr(tp, ha->addr,
+ 					       i + TG3_UCAST_ADDR_IDX(tp));
+ 			i++;
+ 		}
+ 	}
+ 
  	if (rx_mode != tp->rx_mode) {
  		tp->rx_mode = rx_mode;
  		tw32_f(MAC_RX_MODE, rx_mode);
@@@ -9966,6 -10045,7 +10045,7 @@@ static int tg3_reset_hw(struct tg3 *tp
  	if (tg3_asic_rev(tp) == ASIC_REV_5719)
  		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
  	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
  		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
@@@ -10751,6 -10831,7 +10831,7 @@@ static void tg3_periodic_fetch_stats(st
  
  	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
  	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
  	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
  	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
  		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
@@@ -10879,6 -10960,13 +10960,13 @@@ static void tg3_timer(unsigned long __o
  		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
  			   tg3_flag(tp, 5780_CLASS)) {
  			tg3_serdes_parallel_detect(tp);
+ 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
+ 			u32 cpmu = tr32(TG3_CPMU_STATUS);
+ 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
+ 					 TG3_CPMU_STATUS_LINK_MASK);
+ 
+ 			if (link_up != tp->link_up)
+ 				tg3_setup_phy(tp, false);
  		}
  
  		tp->timer_counter = tp->timer_multiplier;
@@@ -11746,8 -11834,6 +11834,6 @@@ static void tg3_get_nstats(struct tg3 *
  		get_stat64(&hw_stats->rx_frame_too_long_errors) +
  		get_stat64(&hw_stats->rx_undersize_packets);
  
- 	stats->rx_over_errors = old_stats->rx_over_errors +
- 		get_stat64(&hw_stats->rxbds_empty);
  	stats->rx_frame_errors = old_stats->rx_frame_errors +
  		get_stat64(&hw_stats->rx_align_errors);
  	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
@@@ -13594,14 -13680,13 +13680,13 @@@ static void tg3_self_test(struct net_de
  
  }
  
- static int tg3_hwtstamp_ioctl(struct net_device *dev,
- 			      struct ifreq *ifr, int cmd)
+ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
  {
  	struct tg3 *tp = netdev_priv(dev);
  	struct hwtstamp_config stmpconf;
  
  	if (!tg3_flag(tp, PTP_CAPABLE))
- 		return -EINVAL;
+ 		return -EOPNOTSUPP;
  
  	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
  		return -EFAULT;
@@@ -13682,6 -13767,67 +13767,67 @@@
  		-EFAULT : 0;
  }
  
+ static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+ {
+ 	struct tg3 *tp = netdev_priv(dev);
+ 	struct hwtstamp_config stmpconf;
+ 
+ 	if (!tg3_flag(tp, PTP_CAPABLE))
+ 		return -EOPNOTSUPP;
+ 
+ 	stmpconf.flags = 0;
+ 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
+ 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+ 
+ 	switch (tp->rxptpctl) {
+ 	case 0:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ 		break;
+ 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ 		break;
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 		return -ERANGE;
+ 	}
+ 
+ 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ 		-EFAULT : 0;
+ }
+ 
  static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  {
  	struct mii_ioctl_data *data = if_mii(ifr);
@@@ -13735,7 -13881,10 +13881,10 @@@
  		return err;
  
  	case SIOCSHWTSTAMP:
- 		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
+ 		return tg3_hwtstamp_set(dev, ifr);
+ 
+ 	case SIOCGHWTSTAMP:
+ 		return tg3_hwtstamp_get(dev, ifr);
  
  	default:
  		/* do nothing */
@@@ -14856,7 -15005,8 +15005,8 @@@ static void tg3_get_eeprom_hw_cfg(struc
  	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
  	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
  		u32 nic_cfg, led_cfg;
- 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+ 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
+ 		u32 nic_phy_id, ver, eeprom_phy_id;
  		int eeprom_phy_serdes = 0;
  
  		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@@ -14873,6 -15023,11 +15023,11 @@@
  		if (tg3_asic_rev(tp) == ASIC_REV_5785)
  			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
  
+ 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ 		    tg3_asic_rev(tp) == ASIC_REV_5720)
+ 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
+ 
  		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
  		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
  			eeprom_phy_serdes = 1;
@@@ -15025,6 -15180,9 +15180,9 @@@
  			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
  		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
  			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+ 
+ 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
+ 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
  	}
  done:
  	if (tg3_flag(tp, WOL_CAP))
@@@ -15120,9 -15278,11 +15278,11 @@@ static void tg3_phy_init_link_config(st
  {
  	u32 adv = ADVERTISED_Autoneg;
  
- 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
- 		adv |= ADVERTISED_1000baseT_Half |
- 		       ADVERTISED_1000baseT_Full;
+ 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ 			adv |= ADVERTISED_1000baseT_Half;
+ 		adv |= ADVERTISED_1000baseT_Full;
+ 	}
  
  	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  		adv |= ADVERTISED_100baseT_Half |
@@@ -16470,6 -16630,7 +16630,7 @@@ static int tg3_get_invariants(struct tg
  
  	/* Set these bits to enable statistics workaround. */
  	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
  		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
@@@ -16612,6 -16773,9 +16773,9 @@@
  	else
  		tg3_flag_clear(tp, POLL_SERDES);
  
+ 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
+ 		tg3_flag_set(tp, POLL_CPMU_LINK);
+ 
  	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
  	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
  	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
@@@ -17533,6 -17697,7 +17697,7 @@@ static int tg3_init_one(struct pci_dev 
  		features |= NETIF_F_LOOPBACK;
  
  	dev->hw_features |= features;
+ 	dev->priv_flags |= IFF_UNICAST_FLT;
  
  	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
  	    !tg3_flag(tp, TSO_CAPABLE) &&
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 56e0415,17fe50b..b97e35c
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@@ -228,25 -228,6 +228,25 @@@ struct tp_params 
  
  	uint32_t dack_re;            /* DACK timer resolution */
  	unsigned short tx_modq[NCHAN];	/* channel to modulation queue map */
 +
 +	u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
 +	u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
 +
 +	/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
 +	 * subset of the set of fields which may be present in the Compressed
 +	 * Filter Tuple portion of filters and TCP TCB connections.  The
 +	 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
 +	 * Since a variable number of fields may or may not be present, their
 +	 * shifted field positions within the Compressed Filter Tuple may
 +	 * vary, or not even be present if the field isn't selected in
 +	 * TP_VLAN_PRI_MAP.  Since some of these fields are needed in various
 +	 * places we store their offsets here, or a -1 if the field isn't
 +	 * present.
 +	 */
 +	int vlan_shift;
 +	int vnic_shift;
 +	int port_shift;
 +	int protocol_shift;
  };
  
  struct vpd_params {
@@@ -938,15 -919,12 +938,14 @@@ int t4_seeprom_wp(struct adapter *adapt
  int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
  int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
  unsigned int t4_flash_cfg_addr(struct adapter *adapter);
- int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
  int t4_get_fw_version(struct adapter *adapter, u32 *vers);
  int t4_get_tp_version(struct adapter *adapter, u32 *vers);
  int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
  	       const u8 *fw_data, unsigned int fw_size,
  	       struct fw_hdr *card_fw, enum dev_state state, int *reset);
  int t4_prep_adapter(struct adapter *adapter);
 +int t4_init_tp_params(struct adapter *adap);
 +int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
  int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
  void t4_fatal_err(struct adapter *adapter);
  int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@@ -979,13 -957,6 +978,6 @@@ int t4_fw_hello(struct adapter *adap, u
  int t4_fw_bye(struct adapter *adap, unsigned int mbox);
  int t4_early_init(struct adapter *adap, unsigned int mbox);
  int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
- int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
- int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
- int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- 		  const u8 *fw_data, unsigned int size, int force);
- int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- 		      unsigned int mtype, unsigned int maddr,
- 		      u32 *finiver, u32 *finicsum, u32 *cfcsum);
  int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
  			  unsigned int cache_line_size);
  int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
diff --combined drivers/net/ethernet/chelsio/cxgb4/sge.c
index cc3511a,4274543..47ffa64
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@@ -1630,7 -1630,8 +1630,8 @@@ static void do_gro(struct sge_eth_rxq *
  	skb->ip_summed = CHECKSUM_UNNECESSARY;
  	skb_record_rx_queue(skb, rxq->rspq.idx);
  	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
- 		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ 			     PKT_HASH_TYPE_L3);
  
  	if (unlikely(pkt->vlan_ex)) {
  		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
@@@ -1686,7 -1687,8 +1687,8 @@@ int t4_ethrx_handler(struct sge_rspq *q
  	skb->protocol = eth_type_trans(skb, q->netdev);
  	skb_record_rx_queue(skb, q->idx);
  	if (skb->dev->features & NETIF_F_RXHASH)
- 		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ 			     PKT_HASH_TYPE_L3);
  
  	rxq->stats.pkts++;
  
@@@ -2581,7 -2583,7 +2583,7 @@@ static int t4_sge_init_soft(struct adap
  	#undef READ_FL_BUF
  
  	if (fl_small_pg != PAGE_SIZE ||
 -	    (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
 +	    (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
  				  (fl_large_pg & (fl_large_pg-1)) != 0))) {
  		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
  			fl_small_pg, fl_large_pg);
diff --combined drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e1413ea,9903a66..a396475
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@@ -38,6 -38,8 +38,8 @@@
  #include "t4_regs.h"
  #include "t4fw_api.h"
  
+ static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ 			 const u8 *fw_data, unsigned int size, int force);
  /**
   *	t4_wait_op_done_val - wait until an operation is completed
   *	@adapter: the adapter performing the operation
@@@ -1070,62 -1072,6 +1072,6 @@@ unsigned int t4_flash_cfg_addr(struct a
  }
  
  /**
-  *	t4_load_cfg - download config file
-  *	@adap: the adapter
-  *	@cfg_data: the cfg text file to write
-  *	@size: text file size
-  *
-  *	Write the supplied config text file to the card's serial flash.
-  */
- int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
- {
- 	int ret, i, n;
- 	unsigned int addr;
- 	unsigned int flash_cfg_start_sec;
- 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- 
- 	addr = t4_flash_cfg_addr(adap);
- 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
- 
- 	if (size > FLASH_CFG_MAX_SIZE) {
- 		dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
- 			FLASH_CFG_MAX_SIZE);
- 		return -EFBIG;
- 	}
- 
- 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
- 			 sf_sec_size);
- 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
- 				     flash_cfg_start_sec + i - 1);
- 	/*
- 	 * If size == 0 then we're simply erasing the FLASH sectors associated
- 	 * with the on-adapter Firmware Configuration File.
- 	 */
- 	if (ret || size == 0)
- 		goto out;
- 
- 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
- 	for (i = 0; i < size; i += SF_PAGE_SIZE) {
- 		if ((size - i) <  SF_PAGE_SIZE)
- 			n = size - i;
- 		else
- 			n = SF_PAGE_SIZE;
- 		ret = t4_write_flash(adap, addr, n, cfg_data);
- 		if (ret)
- 			goto out;
- 
- 		addr += SF_PAGE_SIZE;
- 		cfg_data += SF_PAGE_SIZE;
- 	}
- 
- out:
- 	if (ret)
- 		dev_err(adap->pdev_dev, "config file %s failed %d\n",
- 			(size == 0 ? "clear" : "download"), ret);
- 	return ret;
- }
- 
- /**
   *	t4_load_fw - download firmware
   *	@adap: the adapter
   *	@fw_data: the firmware image to write
@@@ -2810,7 -2756,7 +2756,7 @@@ int t4_fw_reset(struct adapter *adap, u
   *	be doing.  The only way out of this state is to RESTART the firmware
   *	...
   */
- int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
  {
  	int ret = 0;
  
@@@ -2875,7 -2821,7 +2821,7 @@@
   *	    the chip since older firmware won't recognize the PCIE_FW.HALT
   *	    flag and automatically RESET itself on startup.
   */
- int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
  {
  	if (reset) {
  		/*
@@@ -2938,8 -2884,8 +2884,8 @@@
   *	positive errno indicates that the adapter is ~probably~ intact, a
   *	negative errno indicates that things are looking bad ...
   */
- int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- 		  const u8 *fw_data, unsigned int size, int force)
+ static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ 			 const u8 *fw_data, unsigned int size, int force)
  {
  	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
  	int reset, ret;
@@@ -2964,78 -2910,6 +2910,6 @@@
  	return t4_fw_restart(adap, mbox, reset);
  }
  
- 
- /**
-  *	t4_fw_config_file - setup an adapter via a Configuration File
-  *	@adap: the adapter
-  *	@mbox: mailbox to use for the FW command
-  *	@mtype: the memory type where the Configuration File is located
-  *	@maddr: the memory address where the Configuration File is located
-  *	@finiver: return value for CF [fini] version
-  *	@finicsum: return value for CF [fini] checksum
-  *	@cfcsum: return value for CF computed checksum
-  *
-  *	Issue a command to get the firmware to process the Configuration
-  *	File located at the specified mtype/maddress.  If the Configuration
-  *	File is processed successfully and return value pointers are
-  *	provided, the Configuration File "[fini] section version and
-  *	checksum values will be returned along with the computed checksum.
-  *	It's up to the caller to decide how it wants to respond to the
-  *	checksums not matching but it recommended that a prominant warning
-  *	be emitted in order to help people rapidly identify changed or
-  *	corrupted Configuration Files.
-  *
-  *	Also note that it's possible to modify things like "niccaps",
-  *	"toecaps",etc. between processing the Configuration File and telling
-  *	the firmware to use the new configuration.  Callers which want to
-  *	do this will need to "hand-roll" their own CAPS_CONFIGS commands for
-  *	Configuration Files if they want to do this.
-  */
- int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- 		      unsigned int mtype, unsigned int maddr,
- 		      u32 *finiver, u32 *finicsum, u32 *cfcsum)
- {
- 	struct fw_caps_config_cmd caps_cmd;
- 	int ret;
- 
- 	/*
- 	 * Tell the firmware to process the indicated Configuration File.
- 	 * If there are no errors and the caller has provided return value
- 	 * pointers for the [fini] section version, checksum and computed
- 	 * checksum, pass those back to the caller.
- 	 */
- 	memset(&caps_cmd, 0, sizeof(caps_cmd));
- 	caps_cmd.op_to_write =
- 		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- 		      FW_CMD_REQUEST |
- 		      FW_CMD_READ);
- 	caps_cmd.cfvalid_to_len16 =
- 		htonl(FW_CAPS_CONFIG_CMD_CFVALID |
- 		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
- 		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
- 		      FW_LEN16(caps_cmd));
- 	ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
- 	if (ret < 0)
- 		return ret;
- 
- 	if (finiver)
- 		*finiver = ntohl(caps_cmd.finiver);
- 	if (finicsum)
- 		*finicsum = ntohl(caps_cmd.finicsum);
- 	if (cfcsum)
- 		*cfcsum = ntohl(caps_cmd.cfcsum);
- 
- 	/*
- 	 * And now tell the firmware to use the configuration we just loaded.
- 	 */
- 	caps_cmd.op_to_write =
- 		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- 		      FW_CMD_REQUEST |
- 		      FW_CMD_WRITE);
- 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- 	return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
- }
- 
  /**
   *	t4_fixup_host_params - fix up host-dependent parameters
   *	@adap: the adapter
@@@ -3808,109 -3682,6 +3682,109 @@@ int t4_prep_adapter(struct adapter *ada
  	return 0;
  }
  
 +/**
 + *      t4_init_tp_params - initialize adap->params.tp
 + *      @adap: the adapter
 + *
 + *      Initialize various fields of the adapter's TP Parameters structure.
 + */
 +int t4_init_tp_params(struct adapter *adap)
 +{
 +	int chan;
 +	u32 v;
 +
 +	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
 +	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
 +	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
 +
 +	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
 +	for (chan = 0; chan < NCHAN; chan++)
 +		adap->params.tp.tx_modq[chan] = chan;
 +
 +	/* Cache the adapter's Compressed Filter Mode and global Incress
 +	 * Configuration.
 +	 */
 +	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
 +			 &adap->params.tp.vlan_pri_map, 1,
 +			 TP_VLAN_PRI_MAP);
 +	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
 +			 &adap->params.tp.ingress_config, 1,
 +			 TP_INGRESS_CONFIG);
 +
 +	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
 +	 * shift positions of several elements of the Compressed Filter Tuple
 +	 * for this adapter which we need frequently ...
 +	 */
 +	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
 +	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
 +	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
 +	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
 +							       F_PROTOCOL);
 +
 +	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
 +	 * represents the presense of an Outer VLAN instead of a VNIC ID.
 +	 */
 +	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
 +		adap->params.tp.vnic_shift = -1;
 +
 +	return 0;
 +}
 +
 +/**
 + *      t4_filter_field_shift - calculate filter field shift
 + *      @adap: the adapter
 + *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
 + *
 + *      Return the shift position of a filter field within the Compressed
 + *      Filter Tuple.  The filter field is specified via its selection bit
 + *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
 + */
 +int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
 +{
 +	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
 +	unsigned int sel;
 +	int field_shift;
 +
 +	if ((filter_mode & filter_sel) == 0)
 +		return -1;
 +
 +	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
 +		switch (filter_mode & sel) {
 +		case F_FCOE:
 +			field_shift += W_FT_FCOE;
 +			break;
 +		case F_PORT:
 +			field_shift += W_FT_PORT;
 +			break;
 +		case F_VNIC_ID:
 +			field_shift += W_FT_VNIC_ID;
 +			break;
 +		case F_VLAN:
 +			field_shift += W_FT_VLAN;
 +			break;
 +		case F_TOS:
 +			field_shift += W_FT_TOS;
 +			break;
 +		case F_PROTOCOL:
 +			field_shift += W_FT_PROTOCOL;
 +			break;
 +		case F_ETHERTYPE:
 +			field_shift += W_FT_ETHERTYPE;
 +			break;
 +		case F_MACMATCH:
 +			field_shift += W_FT_MACMATCH;
 +			break;
 +		case F_MPSHITTYPE:
 +			field_shift += W_FT_MPSHITTYPE;
 +			break;
 +		case F_FRAGMENTATION:
 +			field_shift += W_FT_FRAGMENTATION;
 +			break;
 +		}
 +	}
 +	return field_shift;
 +}
 +
  int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
  {
  	u8 addr[6];
diff --combined drivers/net/ethernet/freescale/fec_main.c
index 45b8b22,05cd81a..4b9976a0
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@@ -428,8 -428,6 +428,8 @@@ fec_enet_start_xmit(struct sk_buff *skb
  	/* If this was the last BD in the ring, start at the beginning again. */
  	bdp = fec_enet_get_nextdesc(bdp, fep);
  
 +	skb_tx_timestamp(skb);
 +
  	fep->cur_tx = bdp;
  
  	if (fep->cur_tx == fep->dirty_tx)
@@@ -438,6 -436,8 +438,6 @@@
  	/* Trigger transmission start */
  	writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  
 -	skb_tx_timestamp(skb);
 -
  	return NETDEV_TX_OK;
  }
  
@@@ -1679,8 -1679,12 +1679,12 @@@ static int fec_enet_ioctl(struct net_de
  	if (!phydev)
  		return -ENODEV;
  
- 	if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
- 		return fec_ptp_ioctl(ndev, rq, cmd);
+ 	if (fep->bufdesc_ex) {
+ 		if (cmd == SIOCSHWTSTAMP)
+ 			return fec_ptp_set(ndev, rq);
+ 		if (cmd == SIOCGHWTSTAMP)
+ 			return fec_ptp_get(ndev, rq);
+ 	}
  
  	return phy_mii_ioctl(phydev, rq, cmd);
  }
@@@ -2049,8 -2053,6 +2053,8 @@@ static void fec_reset_phy(struct platfo
  	int err, phy_reset;
  	int msec = 1;
  	struct device_node *np = pdev->dev.of_node;
 +	enum of_gpio_flags flags;
 +	bool port;
  
  	if (!np)
  		return;
@@@ -2060,22 -2062,18 +2064,22 @@@
  	if (msec > 1000)
  		msec = 1;
  
 -	phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
 +	phy_reset = of_get_named_gpio_flags(np, "phy-reset-gpios", 0, &flags);
  	if (!gpio_is_valid(phy_reset))
  		return;
  
 -	err = devm_gpio_request_one(&pdev->dev, phy_reset,
 -				    GPIOF_OUT_INIT_LOW, "phy-reset");
 +	if (flags & OF_GPIO_ACTIVE_LOW)
 +		port = GPIOF_OUT_INIT_LOW;
 +	else
 +		port = GPIOF_OUT_INIT_HIGH;
 +
 +	err = devm_gpio_request_one(&pdev->dev, phy_reset, port, "phy-reset");
  	if (err) {
  		dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
  		return;
  	}
  	msleep(msec);
 -	gpio_set_value(phy_reset, 1);
 +	gpio_set_value(phy_reset, !port);
  }
  #else /* CONFIG_OF */
  static void fec_reset_phy(struct platform_device *pdev)
diff --combined drivers/net/ethernet/ibm/ibmveth.c
index d04dbab,cde0fd9..4be9715
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@@ -12,8 -12,7 +12,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
-  * along with this program; if not, write to the Free Software
-  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   *
   * Copyright (C) IBM Corporation, 2003, 2010
   *
@@@ -1276,21 -1275,18 +1275,21 @@@ static unsigned long ibmveth_get_desire
  {
  	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
  	struct ibmveth_adapter *adapter;
 +	struct iommu_table *tbl;
  	unsigned long ret;
  	int i;
  	int rxqentries = 1;
  
 +	tbl = get_iommu_table_base(&vdev->dev);
 +
  	/* netdev inits at probe time along with the structures we need below*/
  	if (netdev == NULL)
 -		return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
 +		return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
  
  	adapter = netdev_priv(netdev);
  
  	ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
 -	ret += IOMMU_PAGE_ALIGN(netdev->mtu);
 +	ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
  
  	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  		/* add the size of the active receive buffers */
@@@ -1298,12 -1294,11 +1297,12 @@@
  			ret +=
  			    adapter->rx_buff_pool[i].size *
  			    IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
 -			            buff_size);
 +					     buff_size, tbl);
  		rxqentries += adapter->rx_buff_pool[i].size;
  	}
  	/* add the size of the receive queue entries */
 -	ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
 +	ret += IOMMU_PAGE_ALIGN(
 +		rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
  
  	return ret;
  }
diff --combined drivers/net/ethernet/intel/e1000e/netdev.c
index c30d41d,051d158..d6570b2
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@@ -5790,7 -5790,7 +5790,7 @@@ static int e1000_mii_ioctl(struct net_d
   * specified. Matching the kind of event packet is not supported, with the
   * exception of "all V2 events regardless of level 2 or 4".
   **/
- static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+ static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
  {
  	struct e1000_adapter *adapter = netdev_priv(netdev);
  	struct hwtstamp_config config;
@@@ -5825,6 -5825,14 +5825,14 @@@
  			    sizeof(config)) ? -EFAULT : 0;
  }
  
+ static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+ {
+ 	struct e1000_adapter *adapter = netdev_priv(netdev);
+ 
+ 	return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ 			    sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
+ }
+ 
  static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  {
  	switch (cmd) {
@@@ -5833,7 -5841,9 +5841,9 @@@
  	case SIOCSMIIREG:
  		return e1000_mii_ioctl(netdev, ifr, cmd);
  	case SIOCSHWTSTAMP:
- 		return e1000e_hwtstamp_ioctl(netdev, ifr);
+ 		return e1000e_hwtstamp_set(netdev, ifr);
+ 	case SIOCGHWTSTAMP:
+ 		return e1000e_hwtstamp_get(netdev, ifr);
  	default:
  		return -EOPNOTSUPP;
  	}
@@@ -6174,7 -6184,7 +6184,7 @@@ static int __e1000_resume(struct pci_de
  	return 0;
  }
  
 -#ifdef CONFIG_PM_SLEEP
 +#ifdef CONFIG_PM
  static int e1000_suspend(struct device *dev)
  {
  	struct pci_dev *pdev = to_pci_dev(dev);
@@@ -6193,7 -6203,7 +6203,7 @@@ static int e1000_resume(struct device *
  
  	return __e1000_resume(pdev);
  }
 -#endif /* CONFIG_PM_SLEEP */
 +#endif /* CONFIG_PM */
  
  #ifdef CONFIG_PM_RUNTIME
  static int e1000_runtime_suspend(struct device *dev)
diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 72084f7,9ce07f3..359f6e6
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@@ -291,9 -291,7 +291,9 @@@ static int ixgbe_pci_sriov_disable(stru
  {
  	struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
  	int err;
 +#ifdef CONFIG_PCI_IOV
  	u32 current_flags = adapter->flags;
 +#endif
  
  	err = ixgbe_disable_sriov(adapter);
  
@@@ -717,8 -715,7 +717,7 @@@ static int ixgbe_set_vf_mac_addr(struc
  	}
  
  	if (adapter->vfinfo[vf].pf_set_mac &&
- 	    memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
- 		   ETH_ALEN)) {
+ 	    !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
  		e_warn(drv,
  		       "VF %d attempted to override administratively set MAC address\n"
  		       "Reload the VF driver to resume operations\n",
diff --combined drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index cc68657,3010abb..3205861
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@@ -14,9 -14,7 +14,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
-  * along with this program; if not, write to the Free Software
-  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
-  * MA  02111-1307, USA.
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   *
   * The full GNU General Public License is included in this distribution
   * in the file called "COPYING".
@@@ -1604,13 -1602,13 +1602,13 @@@ netxen_process_lro(struct netxen_adapte
  	u32 seq_number;
  	u8 vhdr_len = 0;
  
 -	if (unlikely(ring > adapter->max_rds_rings))
 +	if (unlikely(ring >= adapter->max_rds_rings))
  		return NULL;
  
  	rds_ring = &recv_ctx->rds_rings[ring];
  
  	index = netxen_get_lro_sts_refhandle(sts_data0);
 -	if (unlikely(index > rds_ring->num_desc))
 +	if (unlikely(index >= rds_ring->num_desc))
  		return NULL;
  
  	buffer = &rds_ring->rx_buf_arr[index];
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index ff80cd8,4afdef0c..35d4876
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@@ -38,8 -38,8 +38,8 @@@
  
  #define _QLCNIC_LINUX_MAJOR 5
  #define _QLCNIC_LINUX_MINOR 3
- #define _QLCNIC_LINUX_SUBVERSION 52
- #define QLCNIC_LINUX_VERSIONID  "5.3.52"
+ #define _QLCNIC_LINUX_SUBVERSION 53
+ #define QLCNIC_LINUX_VERSIONID  "5.3.53"
  #define QLCNIC_DRV_IDC_VER  0x01
  #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
  		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@@ -115,6 -115,10 +115,10 @@@ enum qlcnic_queue_type 
  #define QLCNIC_VNIC_MODE	0xFF
  #define QLCNIC_DEFAULT_MODE	0x0
  
+ /* Virtual NIC function count */
+ #define QLC_DEFAULT_VNIC_COUNT	8
+ #define QLC_84XX_VNIC_COUNT	16
+ 
  /*
   * Following are the states of the Phantom. Phantom will set them and
   * Host will read to check if the fields are correct.
@@@ -374,7 -378,7 +378,7 @@@ struct qlcnic_rx_buffer 
  
  #define QLCNIC_INTR_DEFAULT			0x04
  #define QLCNIC_CONFIG_INTR_COALESCE		3
- #define QLCNIC_DEV_INFO_SIZE			1
+ #define QLCNIC_DEV_INFO_SIZE			2
  
  struct qlcnic_nic_intr_coalesce {
  	u8	type;
@@@ -462,8 -466,10 +466,10 @@@ struct qlcnic_hardware_context 
  	u16 max_rx_ques;
  	u16 max_mtu;
  	u32 msg_enable;
- 	u16 act_pci_func;
+ 	u16 total_nic_func;
  	u16 max_pci_func;
+ 	u32 max_vnic_func;
+ 	u32 total_pci_func;
  
  	u32 capabilities;
  	u32 extra_capability[3];
@@@ -487,7 -493,6 +493,7 @@@
  	struct qlcnic_mailbox *mailbox;
  	u8 extend_lb_time;
  	u8 phys_port_id[ETH_ALEN];
 +	u8 lb_mode;
  };
  
  struct qlcnic_adapter_stats {
@@@ -579,8 -584,6 +585,8 @@@ struct qlcnic_host_tx_ring 
  	dma_addr_t phys_addr;
  	dma_addr_t hw_cons_phys_addr;
  	struct netdev_queue *txq;
 +	/* Lock to protect Tx descriptors cleanup */
 +	spinlock_t tx_clean_lock;
  } ____cacheline_internodealigned_in_smp;
  
  /*
@@@ -791,9 -794,10 +797,10 @@@ struct qlcnic_cardrsp_tx_ctx 
  #define QLCNIC_MAC_VLAN_ADD	3
  #define QLCNIC_MAC_VLAN_DEL	4
  
- struct qlcnic_mac_list_s {
+ struct qlcnic_mac_vlan_list {
  	struct list_head list;
  	uint8_t mac_addr[ETH_ALEN+2];
+ 	u16 vlan_id;
  };
  
  /* MAC Learn */
@@@ -811,7 -815,6 +818,7 @@@
  
  #define QLCNIC_ILB_MODE		0x1
  #define QLCNIC_ELB_MODE		0x2
 +#define QLCNIC_LB_MODE_MASK	0x3
  
  #define QLCNIC_LINKEVENT	0x1
  #define QLCNIC_LB_RESPONSE	0x2
@@@ -860,7 -863,7 +867,7 @@@
  #define QLCNIC_FW_CAP2_HW_LRO_IPV6		BIT_3
  #define QLCNIC_FW_CAPABILITY_SET_DRV_VER	BIT_5
  #define QLCNIC_FW_CAPABILITY_2_BEACON		BIT_7
- #define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG	BIT_8
+ #define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG	BIT_9
  
  /* module types */
  #define LINKEVENT_MODULE_NOT_PRESENT			1
@@@ -1097,6 -1100,7 +1104,6 @@@ struct qlcnic_adapter 
  	struct qlcnic_filter_hash rx_fhash;
  	struct list_head vf_mc_list;
  
 -	spinlock_t tx_clean_lock;
  	spinlock_t mac_learn_lock;
  	/* spinlock for catching rcv filters for eswitch traffic */
  	spinlock_t rx_mac_learn_lock;
@@@ -1640,7 -1644,9 +1647,9 @@@ int qlcnic_setup_netdev(struct qlcnic_a
  void qlcnic_set_netdev_features(struct qlcnic_adapter *,
  				struct qlcnic_esw_func_cfg *);
  void qlcnic_sriov_vf_schedule_multi(struct net_device *);
- void qlcnic_vf_add_mc_list(struct net_device *, u16);
+ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+ int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+ 			     u16 *);
  
  /*
   * QLOGIC Board information
@@@ -2139,4 -2145,26 +2148,26 @@@ static inline bool qlcnic_sriov_vf_chec
  
  	return status;
  }
+ 
+ static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+ {
+ 	unsigned short device = adapter->pdev->device;
+ 
+ 	return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+ }
+ 
+ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+ {
+ 	unsigned short device = adapter->pdev->device;
+ 
+ 	return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+ }
+ 
+ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+ {
+ 	if (qlcnic_84xx_check(adapter))
+ 		return QLC_84XX_VNIC_COUNT;
+ 	else
+ 		return QLC_DEFAULT_VNIC_COUNT;
+ }
  #endif				/* __QLCNIC_H_ */
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f776f99,b3fd160..03eb2ad
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@@ -15,6 -15,7 +15,7 @@@
  
  #define RSS_HASHTYPE_IP_TCP		0x3
  #define QLC_83XX_FW_MBX_CMD		0
+ #define QLC_SKIP_INACTIVE_PCI_REGS	7
  
  static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
  	{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@@ -34,7 -35,7 +35,7 @@@
  	{QLCNIC_CMD_READ_MAX_MTU, 4, 2},
  	{QLCNIC_CMD_READ_MAX_LRO, 4, 2},
  	{QLCNIC_CMD_MAC_ADDRESS, 4, 3},
- 	{QLCNIC_CMD_GET_PCI_INFO, 1, 66},
+ 	{QLCNIC_CMD_GET_PCI_INFO, 1, 129},
  	{QLCNIC_CMD_GET_NIC_INFO, 2, 19},
  	{QLCNIC_CMD_SET_NIC_INFO, 32, 1},
  	{QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
@@@ -68,7 -69,7 +69,7 @@@
  	{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
  	{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
  	{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
- 	{QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
+ 	{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
  };
  
  const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@@ -289,6 -290,7 +290,7 @@@ int qlcnic_83xx_setup_intr(struct qlcni
  		if (qlcnic_sriov_vf_check(adapter))
  			return -EINVAL;
  		num_msix = 1;
+ 		adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
  	}
  	/* setup interrupt mapping table for fw */
  	ahw->intr_tbl = vzalloc(num_msix *
@@@ -315,12 -317,12 +317,12 @@@
  	return 0;
  }
  
- inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+ static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
  {
  	writel(0, adapter->tgt_mask_reg);
  }
  
- inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+ static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
  {
  	if (adapter->tgt_mask_reg)
  		writel(1, adapter->tgt_mask_reg);
@@@ -340,7 -342,7 +342,7 @@@ void qlcnic_83xx_disable_intr(struct ql
  	writel(1, sds_ring->crb_intr_mask);
  }
  
- inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+ static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
  						    *adapter)
  {
  	u32 mask;
@@@ -637,7 -639,7 +639,7 @@@ int qlcnic_83xx_get_port_info(struct ql
  void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
  {
  	struct qlcnic_hardware_context *ahw = adapter->ahw;
- 	u16 act_pci_fn = ahw->act_pci_func;
+ 	u16 act_pci_fn = ahw->total_nic_func;
  	u16 count;
  
  	ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
@@@ -1498,8 -1500,7 +1500,7 @@@ int  qlcnic_83xx_set_led(struct net_dev
  	return err;
  }
  
- void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
- 				       int enable)
+ void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
  {
  	struct qlcnic_cmd_args cmd;
  	int status;
@@@ -1507,21 -1508,21 +1508,21 @@@
  	if (qlcnic_sriov_vf_check(adapter))
  		return;
  
- 	if (enable) {
+ 	if (enable)
  		status = qlcnic_alloc_mbx_args(&cmd, adapter,
  					       QLCNIC_CMD_INIT_NIC_FUNC);
- 		if (status)
- 			return;
- 
- 		cmd.req.arg[1] = BIT_0 | BIT_31;
- 	} else {
+ 	else
  		status = qlcnic_alloc_mbx_args(&cmd, adapter,
  					       QLCNIC_CMD_STOP_NIC_FUNC);
- 		if (status)
- 			return;
  
- 		cmd.req.arg[1] = BIT_0 | BIT_31;
- 	}
+ 	if (status)
+ 		return;
+ 
+ 	cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+ 
+ 	if (adapter->dcb)
+ 		cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+ 
  	status = qlcnic_issue_cmd(adapter, &cmd);
  	if (status)
  		dev_err(&adapter->pdev->dev,
@@@ -1617,7 -1618,7 +1618,7 @@@ int qlcnic_83xx_nic_set_promisc(struct 
  
  	cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
  	qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
- 	cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+ 	cmd->req.arg[1] = mode | temp;
  	err = qlcnic_issue_cmd(adapter, cmd);
  	if (!err)
  		return err;
@@@ -1684,6 -1685,12 +1685,6 @@@ int qlcnic_83xx_loopback_test(struct ne
  		}
  	} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
  
 -	/* Make sure carrier is off and queue is stopped during loopback */
 -	if (netif_running(netdev)) {
 -		netif_carrier_off(netdev);
 -		netif_tx_stop_all_queues(netdev);
 -	}
 -
  	ret = qlcnic_do_lb_test(adapter, mode);
  
  	qlcnic_83xx_clear_lb_mode(adapter, mode);
@@@ -2115,7 -2122,6 +2116,7 @@@ static void qlcnic_83xx_handle_link_aen
  	ahw->link_autoneg = MSB(MSW(data[3]));
  	ahw->module_type = MSB(LSW(data[3]));
  	ahw->has_link_events = 1;
 +	ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
  	qlcnic_advert_link_change(adapter, link_status);
  }
  
@@@ -2268,11 -2274,37 +2269,37 @@@ out
  	return err;
  }
  
+ int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+ 			     u16 *nic, u16 *fcoe, u16 *iscsi)
+ {
+ 	struct device *dev = &adapter->pdev->dev;
+ 	int err = 0;
+ 
+ 	switch (type) {
+ 	case QLCNIC_TYPE_NIC:
+ 		(*nic)++;
+ 		break;
+ 	case QLCNIC_TYPE_FCOE:
+ 		(*fcoe)++;
+ 		break;
+ 	case QLCNIC_TYPE_ISCSI:
+ 		(*iscsi)++;
+ 		break;
+ 	default:
+ 		dev_err(dev, "%s: Unknown PCI type[%x]\n",
+ 			__func__, type);
+ 		err = -EIO;
+ 	}
+ 
+ 	return err;
+ }
+ 
  int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
  			     struct qlcnic_pci_info *pci_info)
  {
  	struct qlcnic_hardware_context *ahw = adapter->ahw;
  	struct device *dev = &adapter->pdev->dev;
+ 	u16 nic = 0, fcoe = 0, iscsi = 0;
  	struct qlcnic_cmd_args cmd;
  	int i, err = 0, j = 0;
  	u32 temp;
@@@ -2283,16 -2315,20 +2310,20 @@@
  
  	err = qlcnic_issue_cmd(adapter, &cmd);
  
- 	ahw->act_pci_func = 0;
+ 	ahw->total_nic_func = 0;
  	if (err == QLCNIC_RCODE_SUCCESS) {
  		ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
- 		for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
+ 		for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
  			pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
  			pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
  			i++;
+ 			if (!pci_info->active) {
+ 				i += QLC_SKIP_INACTIVE_PCI_REGS;
+ 				continue;
+ 			}
  			pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
- 			if (pci_info->type == QLCNIC_TYPE_NIC)
- 				ahw->act_pci_func++;
+ 			err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ 						       &nic, &fcoe, &iscsi);
  			temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
  			pci_info->default_port = temp;
  			i++;
@@@ -2310,6 -2346,13 +2341,13 @@@
  		err = -EIO;
  	}
  
+ 	ahw->total_nic_func = nic;
+ 	ahw->total_pci_func = nic + fcoe + iscsi;
+ 	if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ 		dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ 			__func__, ahw->total_nic_func, ahw->total_pci_func);
+ 		err = -EIO;
+ 	}
  	qlcnic_free_mbx_args(&cmd);
  
  	return err;
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index ad1531a,a215e0f..6373f60
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@@ -127,7 -127,7 +127,7 @@@
  struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
  				     struct qlcnic_host_rds_ring *, u16, u16);
  
- inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
  				  struct qlcnic_host_tx_ring *tx_ring)
  {
  	if (qlcnic_check_multi_tx(adapter) &&
@@@ -144,13 -144,13 +144,13 @@@ static inline void qlcnic_disable_tx_in
  		writel(1, tx_ring->crb_intr_mask);
  }
  
- inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ static inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
  				       struct qlcnic_host_tx_ring *tx_ring)
  {
  	writel(0, tx_ring->crb_intr_mask);
  }
  
- inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ static inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
  					struct qlcnic_host_tx_ring *tx_ring)
  {
  	writel(1, tx_ring->crb_intr_mask);
@@@ -202,7 -202,7 +202,7 @@@ static struct qlcnic_filter *qlcnic_fin
  	struct hlist_node *n;
  
  	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- 		if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
+ 		if (ether_addr_equal(tmp_fil->faddr, addr) &&
  		    tmp_fil->vlan_id == vlan_id)
  			return tmp_fil;
  	}
@@@ -346,7 -346,7 +346,7 @@@ static void qlcnic_send_filter(struct q
  	head = &(adapter->fhash.fhead[hindex]);
  
  	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- 		if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ 		if (ether_addr_equal(tmp_fil->faddr, &src_addr) &&
  		    tmp_fil->vlan_id == vlan_id) {
  			if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
  				qlcnic_change_filter(adapter, &src_addr,
@@@ -689,10 -689,6 +689,10 @@@ void qlcnic_advert_link_change(struct q
  		adapter->ahw->linkup = 0;
  		netif_carrier_off(netdev);
  	} else if (!adapter->ahw->linkup && linkup) {
 +		/* Do not advertise Link up if the port is in loopback mode */
 +		if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
 +			return;
 +
  		netdev_info(netdev, "NIC Link is up\n");
  		adapter->ahw->linkup = 1;
  		netif_carrier_on(netdev);
@@@ -782,7 -778,7 +782,7 @@@ static int qlcnic_process_cmd_ring(stru
  	struct net_device *netdev = adapter->netdev;
  	struct qlcnic_skb_frag *frag;
  
 -	if (!spin_trylock(&adapter->tx_clean_lock))
 +	if (!spin_trylock(&tx_ring->tx_clean_lock))
  		return 1;
  
  	sw_consumer = tx_ring->sw_consumer;
@@@ -811,9 -807,8 +811,9 @@@
  			break;
  	}
  
 +	tx_ring->sw_consumer = sw_consumer;
 +
  	if (count && netif_running(netdev)) {
 -		tx_ring->sw_consumer = sw_consumer;
  		smp_mb();
  		if (netif_tx_queue_stopped(tx_ring->txq) &&
  		    netif_carrier_ok(netdev)) {
@@@ -839,8 -834,7 +839,8 @@@
  	 */
  	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
  	done = (sw_consumer == hw_consumer);
 -	spin_unlock(&adapter->tx_clean_lock);
 +
 +	spin_unlock(&tx_ring->tx_clean_lock);
  
  	return done;
  }
@@@ -1466,8 -1460,7 +1466,7 @@@ int qlcnic_82xx_napi_add(struct qlcnic_
  	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
  		sds_ring = &recv_ctx->sds_rings[ring];
  		if (qlcnic_check_multi_tx(adapter) &&
- 		    !adapter->ahw->diag_test &&
- 		    (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ 		    !adapter->ahw->diag_test) {
  			netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
  				       NAPI_POLL_WEIGHT);
  		} else {
@@@ -1540,8 -1533,7 +1539,7 @@@ void qlcnic_82xx_napi_enable(struct qlc
  
  	if (qlcnic_check_multi_tx(adapter) &&
  	    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
- 	    !adapter->ahw->diag_test &&
- 	    (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ 	    !adapter->ahw->diag_test) {
  		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
  			tx_ring = &adapter->tx_ring[ring];
  			napi_enable(&tx_ring->napi);
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index b8a245a,d131ec1..eeec83a
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@@ -308,12 -308,12 +308,12 @@@ int qlcnic_read_mac_addr(struct qlcnic_
  
  static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
  {
- 	struct qlcnic_mac_list_s *cur;
+ 	struct qlcnic_mac_vlan_list *cur;
  	struct list_head *head;
  
  	list_for_each(head, &adapter->mac_list) {
- 		cur = list_entry(head, struct qlcnic_mac_list_s, list);
- 		if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
+ 		cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ 		if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
  			qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
  						  0, QLCNIC_MAC_DEL);
  			list_del(&cur->list);
@@@ -337,7 -337,7 +337,7 @@@ static int qlcnic_set_mac(struct net_de
  	if (!is_valid_ether_addr(addr->sa_data))
  		return -EINVAL;
  
- 	if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
+ 	if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
  		return 0;
  
  	if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@@ -646,8 -646,7 +646,7 @@@ int qlcnic_enable_msix(struct qlcnic_ad
  			} else {
  				adapter->ahw->num_msix = num_msix;
  				if (qlcnic_check_multi_tx(adapter) &&
- 				    !adapter->ahw->diag_test &&
- 				    (adapter->drv_tx_rings > 1))
+ 				    !adapter->ahw->diag_test)
  					drv_sds_rings = num_msix - drv_tx_rings;
  				else
  					drv_sds_rings = num_msix;
@@@ -800,25 -799,26 +799,26 @@@ static void qlcnic_cleanup_pci_map(stru
  
  static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
  {
+ 	struct qlcnic_hardware_context *ahw = adapter->ahw;
  	struct qlcnic_pci_info *pci_info;
  	int ret;
  
  	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- 		switch (adapter->ahw->port_type) {
+ 		switch (ahw->port_type) {
  		case QLCNIC_GBE:
- 			adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ 			ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
  			break;
  		case QLCNIC_XGBE:
- 			adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
+ 			ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
  			break;
  		}
  		return 0;
  	}
  
- 	if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ 	if (ahw->op_mode == QLCNIC_MGMT_FUNC)
  		return 0;
  
- 	pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ 	pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
  	if (!pci_info)
  		return -ENOMEM;
  
@@@ -846,12 -846,13 +846,13 @@@ static bool qlcnic_port_eswitch_cfg_cap
  
  int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
  {
+ 	struct qlcnic_hardware_context *ahw = adapter->ahw;
  	struct qlcnic_pci_info *pci_info;
  	int i, id = 0, ret = 0, j = 0;
  	u16 act_pci_func;
  	u8 pfn;
  
- 	pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ 	pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
  	if (!pci_info)
  		return -ENOMEM;
  
@@@ -859,7 -860,7 +860,7 @@@
  	if (ret)
  		goto err_pci_info;
  
- 	act_pci_func = adapter->ahw->act_pci_func;
+ 	act_pci_func = ahw->total_nic_func;
  
  	adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
  				 act_pci_func, GFP_KERNEL);
@@@ -875,10 -876,10 +876,10 @@@
  		goto err_npars;
  	}
  
- 	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ 	for (i = 0; i < ahw->max_vnic_func; i++) {
  		pfn = pci_info[i].id;
  
- 		if (pfn >= QLCNIC_MAX_PCI_FUNC) {
+ 		if (pfn >= ahw->max_vnic_func) {
  			ret = QL_STATUS_INVALID_PARAM;
  			goto err_eswitch;
  		}
@@@ -1346,7 -1347,7 +1347,7 @@@ int qlcnic_set_default_offload_settings
  	if (adapter->need_fw_reset)
  		return 0;
  
- 	for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ 	for (i = 0; i < adapter->ahw->total_nic_func; i++) {
  		if (!adapter->npars[i].eswitch_status)
  			continue;
  
@@@ -1409,7 -1410,7 +1410,7 @@@ int qlcnic_reset_npar_config(struct qlc
  			return 0;
  
  	/* Set the NPAR config data after FW reset */
- 	for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ 	for (i = 0; i < adapter->ahw->total_nic_func; i++) {
  		npar = &adapter->npars[i];
  		pci_func = npar->pci_func;
  		if (!adapter->npars[i].eswitch_status)
@@@ -1756,6 -1757,7 +1757,6 @@@ void __qlcnic_down(struct qlcnic_adapte
  	if (qlcnic_sriov_vf_check(adapter))
  		qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
  	smp_mb();
 -	spin_lock(&adapter->tx_clean_lock);
  	netif_carrier_off(netdev);
  	adapter->ahw->linkup = 0;
  	netif_tx_disable(netdev);
@@@ -1776,6 -1778,7 +1777,6 @@@
  
  	for (ring = 0; ring < adapter->drv_tx_rings; ring++)
  		qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
 -	spin_unlock(&adapter->tx_clean_lock);
  }
  
  /* Usage: During suspend and firmware recovery module */
@@@ -2035,7 -2038,7 +2036,7 @@@ qlcnic_reset_context(struct qlcnic_adap
  void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
  {
  	struct qlcnic_hardware_context *ahw = adapter->ahw;
- 	u16 act_pci_fn = ahw->act_pci_func;
+ 	u16 act_pci_fn = ahw->total_nic_func;
  	u16 count;
  
  	ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
@@@ -2170,7 -2173,6 +2171,7 @@@ int qlcnic_alloc_tx_rings(struct qlcnic
  		}
  		memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
  		tx_ring->cmd_buf_arr = cmd_buf_arr;
 +		spin_lock_init(&tx_ring->tx_clean_lock);
  	}
  
  	if (qlcnic_83xx_check(adapter) ||
@@@ -2211,7 -2213,6 +2212,6 @@@ qlcnic_probe(struct pci_dev *pdev, cons
  	struct qlcnic_hardware_context *ahw;
  	int err, pci_using_dac = -1;
  	char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
- 	struct qlcnic_dcb *dcb;
  
  	if (pdev->is_virtfn)
  		return -ENODEV;
@@@ -2289,7 -2290,8 +2289,8 @@@
  		goto err_out_free_wq;
  
  	adapter->dev_rst_time = jiffies;
- 	adapter->ahw->revision_id = pdev->revision;
+ 	ahw->revision_id = pdev->revision;
+ 	ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
  	if (qlcnic_mac_learn == FDB_MAC_LEARN)
  		adapter->fdb_mac_learn = true;
  	else if (qlcnic_mac_learn == DRV_MAC_LEARN)
@@@ -2298,6 -2300,7 +2299,6 @@@
  	rwlock_init(&adapter->ahw->crb_lock);
  	mutex_init(&adapter->ahw->mem_lock);
  
 -	spin_lock_init(&adapter->tx_clean_lock);
  	INIT_LIST_HEAD(&adapter->mac_list);
  
  	qlcnic_register_dcb(adapter);
@@@ -2333,10 -2336,6 +2334,6 @@@
  
  		adapter->flags |= QLCNIC_NEED_FLR;
  
- 		dcb = adapter->dcb;
- 
- 		if (dcb && qlcnic_dcb_attach(dcb))
- 			qlcnic_clear_dcb_ops(dcb);
  	} else if (qlcnic_83xx_check(adapter)) {
  		qlcnic_83xx_check_vf(adapter, ent);
  		adapter->portnum = adapter->ahw->pci_func;
@@@ -2365,6 -2364,8 +2362,8 @@@
  		goto err_out_free_hw;
  	}
  
+ 	qlcnic_dcb_enable(adapter->dcb);
+ 
  	if (qlcnic_read_mac_addr(adapter))
  		dev_warn(&pdev->dev, "failed to read mac addr\n");
  
@@@ -2498,13 -2499,11 +2497,11 @@@ static void qlcnic_remove(struct pci_de
  	qlcnic_cancel_idc_work(adapter);
  	ahw = adapter->ahw;
  
- 	qlcnic_dcb_free(adapter->dcb);
- 
  	unregister_netdev(netdev);
  	qlcnic_sriov_cleanup(adapter);
  
  	if (qlcnic_83xx_check(adapter)) {
- 		qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ 		qlcnic_83xx_initialize_nic(adapter, 0);
  		cancel_delayed_work_sync(&adapter->idc_aen_work);
  		qlcnic_83xx_free_mbx_intr(adapter);
  		qlcnic_83xx_detach_mailbox_work(adapter);
@@@ -2512,6 -2511,8 +2509,8 @@@
  		kfree(ahw->fw_info);
  	}
  
+ 	qlcnic_dcb_free(adapter->dcb);
+ 
  	qlcnic_detach(adapter);
  
  	if (adapter->npars != NULL)
@@@ -2640,7 -2641,7 +2639,7 @@@ void qlcnic_alloc_lb_filters_mem(struc
  	if (adapter->fhash.fmax && adapter->fhash.fhead)
  		return;
  
- 	act_pci_func = adapter->ahw->act_pci_func;
+ 	act_pci_func = adapter->ahw->total_nic_func;
  	spin_lock_init(&adapter->mac_learn_lock);
  	spin_lock_init(&adapter->rx_mac_learn_lock);
  
@@@ -3723,12 -3724,6 +3722,6 @@@ int qlcnic_validate_rings(struct qlcnic
  		return -EINVAL;
  	}
  
- 	if (ring_cnt < 2) {
- 		netdev_err(netdev,
- 			   "%s rings value should not be lower than 2\n", buf);
- 		return -EINVAL;
- 	}
- 
  	if (!is_power_of_2(ring_cnt)) {
  		netdev_err(netdev, "%s rings value should be a power of 2\n",
  			   buf);
@@@ -3786,8 -3781,7 +3779,7 @@@ int qlcnic_setup_rings(struct qlcnic_ad
  	}
  
  	if (qlcnic_83xx_check(adapter)) {
- 		/* register for NIC IDC AEN Events */
- 		qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ 		qlcnic_83xx_initialize_nic(adapter, 1);
  		err = qlcnic_83xx_setup_mbx_intr(adapter);
  		qlcnic_83xx_disable_mbx_poll(adapter);
  		if (err) {
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 024f816,98b621f..1fa383b
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@@ -9,7 -9,7 +9,7 @@@
  #include "qlcnic.h"
  #include <linux/types.h>
  
- #define QLCNIC_SRIOV_VF_MAX_MAC 1
+ #define QLCNIC_SRIOV_VF_MAX_MAC 8
  #define QLC_VF_MIN_TX_RATE	100
  #define QLC_VF_MAX_TX_RATE	9999
  
@@@ -64,9 -64,10 +64,10 @@@ static int qlcnic_sriov_pf_cal_res_limi
  {
  	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
  	struct qlcnic_resources *res = &sriov->ff_max;
- 	u32 temp, num_vf_macs, num_vfs, max;
+ 	u16 num_macs = sriov->num_allowed_vlans + 1;
  	int ret = -EIO, vpid, id;
  	struct qlcnic_vport *vp;
+ 	u32 num_vfs, max, temp;
  
  	vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
  	if (vpid < 0)
@@@ -75,17 -76,22 +76,22 @@@
  	num_vfs = sriov->num_vfs;
  	max = num_vfs + 1;
  	info->bit_offsets = 0xffff;
- 	info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
- 	num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
 -	info->max_tx_ques = res->num_tx_queues / max;
+ 
+ 	if (qlcnic_83xx_pf_check(adapter))
+ 		num_macs = 1;
  
  	if (adapter->ahw->pci_func == func) {
- 		temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
- 		info->max_rx_ucast_mac_filters = temp;
- 		temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
- 		info->max_tx_mac_filters = temp;
  		info->min_tx_bw = 0;
  		info->max_tx_bw = MAX_BW;
 +		info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
+ 		temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+ 		info->max_rx_ucast_mac_filters = temp;
+ 		temp = res->num_tx_mac_filters - num_macs * num_vfs;
+ 		info->max_tx_mac_filters = temp;
+ 		temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+ 		temp = res->num_rx_mcast_mac_filters - temp;
+ 		info->max_rx_mcast_mac_filters = temp;
+ 
  	} else {
  		id = qlcnic_sriov_func_to_index(adapter, func);
  		if (id < 0)
@@@ -93,9 -99,10 +99,11 @@@
  		vp = sriov->vf_info[id].vp;
  		info->min_tx_bw = vp->min_tx_bw;
  		info->max_tx_bw = vp->max_tx_bw;
- 		info->max_rx_ucast_mac_filters = num_vf_macs;
- 		info->max_tx_mac_filters = num_vf_macs;
+ 		info->max_rx_ucast_mac_filters = num_macs;
+ 		info->max_tx_mac_filters = num_macs;
 +		info->max_tx_ques = QLCNIC_SINGLE_RING;
+ 		temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+ 		info->max_rx_mcast_mac_filters = temp;
  	}
  
  	info->max_rx_ip_addr = res->num_destip / max;
@@@ -133,6 -140,25 +141,25 @@@ static void qlcnic_sriov_pf_set_ff_max_
  	ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
  }
  
+ static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+ 					 struct qlcnic_info *npar_info)
+ {
+ 	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ 	int temp, total_fn;
+ 
+ 	temp = npar_info->max_rx_mcast_mac_filters;
+ 	total_fn = sriov->num_vfs + 1;
+ 
+ 	temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+ 	sriov->num_allowed_vlans = temp - 1;
+ 
+ 	if (qlcnic_83xx_pf_check(adapter))
+ 		sriov->num_allowed_vlans = 1;
+ 
+ 	netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+ 		    sriov->num_allowed_vlans);
+ }
+ 
  static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
  				    struct qlcnic_info *npar_info)
  {
@@@ -166,6 -192,7 +193,7 @@@
  	npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
  	npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
  
+ 	qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
  	qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
  	dev_info(&adapter->pdev->dev,
  		 "\n\ttotal_pf: %d,\n"
@@@ -404,6 -431,8 +432,8 @@@ static int qlcnic_pci_sriov_disable(str
  
  	qlcnic_sriov_pf_disable(adapter);
  
+ 	qlcnic_sriov_free_vlans(adapter);
+ 
  	qlcnic_sriov_pf_cleanup(adapter);
  
  	/* After disabling SRIOV re-init the driver in default mode
@@@ -512,6 -541,8 +542,8 @@@ static int __qlcnic_pci_sriov_enable(st
  	if (err)
  		goto del_flr_queue;
  
+ 	qlcnic_sriov_alloc_vlans(adapter);
+ 
  	err = qlcnic_sriov_pf_enable(adapter, num_vfs);
  	return err;
  
@@@ -609,7 -640,7 +641,7 @@@ static int qlcnic_sriov_set_vf_acl(stru
  
  	if (vp->vlan_mode == QLC_PVID_MODE) {
  		cmd.req.arg[2] |= BIT_6;
- 		cmd.req.arg[3] |= vp->vlan << 8;
+ 		cmd.req.arg[3] |= vp->pvid << 8;
  	}
  
  	err = qlcnic_issue_cmd(adapter, &cmd);
@@@ -644,10 -675,13 +676,13 @@@ static int qlcnic_sriov_pf_channel_cfg_
  	struct qlcnic_vf_info *vf = trans->vf;
  	struct qlcnic_vport *vp = vf->vp;
  	struct qlcnic_adapter *adapter;
+ 	struct qlcnic_sriov *sriov;
  	u16 func = vf->pci_func;
+ 	size_t size;
  	int err;
  
  	adapter = vf->adapter;
+ 	sriov = adapter->ahw->sriov;
  
  	if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
  		err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@@ -657,8 -691,12 +692,12 @@@
  				qlcnic_sriov_pf_config_vport(adapter, 0, func);
  		}
  	} else {
- 		if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- 			vp->vlan = 0;
+ 		if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+ 			size = sizeof(*vf->sriov_vlans);
+ 			size = size * sriov->num_allowed_vlans;
+ 			memset(vf->sriov_vlans, 0, size);
+ 		}
+ 
  		err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
  	}
  
@@@ -680,20 -718,23 +719,23 @@@ err_out
  }
  
  static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
- 				       struct qlcnic_vport *vp,
- 				       u16 func, u16 vlan, u8 op)
+ 				       struct qlcnic_vf_info *vf,
+ 				       u16 vlan, u8 op)
  {
  	struct qlcnic_cmd_args cmd;
  	struct qlcnic_macvlan_mbx mv;
+ 	struct qlcnic_vport *vp;
  	u8 *addr;
  	int err;
  	u32 *buf;
  	int vpid;
  
+ 	vp = vf->vp;
+ 
  	if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
  		return -ENOMEM;
  
- 	vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ 	vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
  	if (vpid < 0) {
  		err = -EINVAL;
  		goto out;
@@@ -737,6 -778,35 +779,35 @@@ static int qlcnic_sriov_validate_create
  	return 0;
  }
  
+ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+ 					     struct qlcnic_vf_info *vf,
+ 					     int opcode)
+ {
+ 	struct qlcnic_sriov *sriov;
+ 	u16 vlan;
+ 	int i;
+ 
+ 	sriov = adapter->ahw->sriov;
+ 
+ 	mutex_lock(&vf->vlan_list_lock);
+ 	if (vf->num_vlan) {
+ 		for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ 			vlan = vf->sriov_vlans[i];
+ 			if (vlan)
+ 				qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+ 							    opcode);
+ 		}
+ 	}
+ 	mutex_unlock(&vf->vlan_list_lock);
+ 
+ 	if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+ 		if (qlcnic_83xx_pf_check(adapter) &&
+ 		    qlcnic_sriov_check_any_vlan(vf))
+ 			return;
+ 		qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+ 	}
+ }
+ 
  static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
  					     struct qlcnic_cmd_args *cmd)
  {
@@@ -744,7 -814,6 +815,6 @@@
  	struct qlcnic_adapter *adapter = vf->adapter;
  	struct qlcnic_rcv_mbx_out *mbx_out;
  	int err;
- 	u16 vlan;
  
  	err = qlcnic_sriov_validate_create_rx_ctx(cmd);
  	if (err) {
@@@ -755,12 -824,10 +825,10 @@@
  	cmd->req.arg[6] = vf->vp->handle;
  	err = qlcnic_issue_cmd(adapter, cmd);
  
  	if (!err) {
  		mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
  		vf->rx_ctx_id = mbx_out->ctx_id;
- 		qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- 					    vlan, QLCNIC_MAC_ADD);
+ 		qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
  	} else {
  		vf->rx_ctx_id = 0;
  	}
@@@ -844,7 -911,6 +912,6 @@@ static int qlcnic_sriov_pf_del_rx_ctx_c
  	struct qlcnic_vf_info *vf = trans->vf;
  	struct qlcnic_adapter *adapter = vf->adapter;
  	int err;
- 	u16 vlan;
  
  	err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
  	if (err) {
@@@ -852,9 -918,7 +919,7 @@@
  		return err;
  	}
  
- 	vlan = vf->vp->vlan;
- 	qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- 				    vlan, QLCNIC_MAC_DEL);
+ 	qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
  	cmd->req.arg[1] |= vf->vp->handle << 16;
  	err = qlcnic_issue_cmd(adapter, cmd);
  
@@@ -1121,7 -1185,7 +1186,7 @@@ static int qlcnic_sriov_validate_cfg_ma
  		cmd->req.arg[1] &= ~0x7;
  		new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
  			 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
- 		cmd->req.arg[3] |= vp->vlan << 16;
+ 		cmd->req.arg[3] |= vp->pvid << 16;
  		cmd->req.arg[1] |= new_op;
  	}
  
@@@ -1191,8 -1255,10 +1256,10 @@@ static int qlcnic_sriov_pf_get_acl_cmd(
  	struct qlcnic_vport *vp = vf->vp;
  	u8 cmd_op, mode = vp->vlan_mode;
  	struct qlcnic_adapter *adapter;
+ 	struct qlcnic_sriov *sriov;
  
  	adapter = vf->adapter;
+ 	sriov = adapter->ahw->sriov;
  
  	cmd_op = trans->req_hdr->cmd_op;
  	cmd->rsp.arg[0] |= 1 << 25;
@@@ -1206,10 -1272,10 +1273,10 @@@
  	switch (mode) {
  	case QLC_GUEST_VLAN_MODE:
  		cmd->rsp.arg[1] = mode | 1 << 8;
- 		cmd->rsp.arg[2] = 1 << 16;
+ 		cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
  		break;
  	case QLC_PVID_MODE:
- 		cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
+ 		cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
  		break;
  	}
  
@@@ -1217,24 -1283,27 +1284,27 @@@
  }
  
  static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
- 					  struct qlcnic_vf_info *vf)
- 
+ 					  struct qlcnic_vf_info *vf,
+ 					  struct qlcnic_cmd_args *cmd)
  {
- 	struct qlcnic_vport *vp = vf->vp;
+ 	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ 	u16 vlan;
  
- 	if (!vp->vlan)
+ 	if (!qlcnic_sriov_check_any_vlan(vf))
  		return -EINVAL;
  
+ 	vlan = cmd->req.arg[1] >> 16;
  	if (!vf->rx_ctx_id) {
- 		vp->vlan = 0;
+ 		qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
  		return 0;
  	}
  
- 	qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 				    vp->vlan, QLCNIC_MAC_DEL);
- 	vp->vlan = 0;
- 	qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 				    0, QLCNIC_MAC_ADD);
+ 	qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+ 	qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+ 
+ 	if (qlcnic_83xx_pf_check(adapter))
+ 		qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+ 					    0, QLCNIC_MAC_ADD);
  	return 0;
  }
  
@@@ -1242,32 -1311,37 +1312,37 @@@ static int qlcnic_sriov_pf_add_guest_vl
  					  struct qlcnic_vf_info *vf,
  					  struct qlcnic_cmd_args *cmd)
  {
- 	struct qlcnic_vport *vp = vf->vp;
+ 	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
  	int err = -EIO;
+ 	u16 vlan;
  
- 	if (vp->vlan)
+ 	if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
  		return err;
  
+ 	vlan = cmd->req.arg[1] >> 16;
+ 
  	if (!vf->rx_ctx_id) {
- 		vp->vlan = cmd->req.arg[1] >> 16;
+ 		qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
  		return 0;
  	}
  
- 	err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 					  0, QLCNIC_MAC_DEL);
- 	if (err)
- 		return err;
+ 	if (qlcnic_83xx_pf_check(adapter)) {
+ 		err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ 						  QLCNIC_MAC_DEL);
+ 		if (err)
+ 			return err;
+ 	}
  
- 	vp->vlan = cmd->req.arg[1] >> 16;
- 	err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 					  vp->vlan, QLCNIC_MAC_ADD);
+ 	err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
  
  	if (err) {
- 		qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 					    0, QLCNIC_MAC_ADD);
- 		vp->vlan = 0;
+ 		if (qlcnic_83xx_pf_check(adapter))
+ 			qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ 						    QLCNIC_MAC_ADD);
+ 		return err;
  	}
  
+ 	qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
  	return err;
  }
  
@@@ -1290,7 -1364,7 +1365,7 @@@ static int qlcnic_sriov_pf_cfg_guest_vl
  	if (op)
  		err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
  	else
- 		err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
+ 		err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
  
  	cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
  	return err;
@@@ -1300,8 -1374,6 +1375,6 @@@ static const int qlcnic_pf_passthru_sup
  	QLCNIC_CMD_GET_STATISTICS,
  	QLCNIC_CMD_GET_PORT_CONFIG,
  	QLCNIC_CMD_GET_LINK_STATUS,
- 	QLCNIC_CMD_DCB_QUERY_CAP,
- 	QLCNIC_CMD_DCB_QUERY_PARAM,
  	QLCNIC_CMD_INIT_NIC_FUNC,
  	QLCNIC_CMD_STOP_NIC_FUNC,
  };
@@@ -1597,7 -1669,8 +1670,8 @@@ void qlcnic_sriov_pf_handle_flr(struct 
  	}
  
  	if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- 		vp->vlan = 0;
+ 		memset(vf->sriov_vlans, 0,
+ 		       sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
  
  	qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
  	netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
@@@ -1767,20 -1840,22 +1841,22 @@@ int qlcnic_sriov_set_vf_vlan(struct net
  		return -EOPNOTSUPP;
  	}
  
+ 	memset(vf_info->sriov_vlans, 0,
+ 	       sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+ 
  	switch (vlan) {
  	case 4095:
- 		vp->vlan = 0;
  		vp->vlan_mode = QLC_GUEST_VLAN_MODE;
  		break;
  	case 0:
  		vp->vlan_mode = QLC_NO_VLAN_MODE;
- 		vp->vlan = 0;
  		vp->qos = 0;
  		break;
  	default:
  		vp->vlan_mode = QLC_PVID_MODE;
- 		vp->vlan = vlan;
+ 		qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
  		vp->qos = qos;
+ 		vp->pvid = vlan;
  	}
  
  	netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
@@@ -1795,7 -1870,7 +1871,7 @@@ static __u32 qlcnic_sriov_get_vf_vlan(s
  
  	switch (vp->vlan_mode) {
  	case QLC_PVID_MODE:
- 		vlan = vp->vlan;
+ 		vlan = vp->pvid;
  		break;
  	case QLC_GUEST_VLAN_MODE:
  		vlan = MAX_VLAN_ID;
diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 797b56a,2161410..b8e3a4c
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@@ -64,7 -64,7 +64,7 @@@ static int debug = -1
  module_param(debug, int, S_IRUGO | S_IWUSR);
  MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  
- int phyaddr = -1;
+ static int phyaddr = -1;
  module_param(phyaddr, int, S_IRUGO);
  MODULE_PARM_DESC(phyaddr, "Physical device address");
  
@@@ -622,15 -622,17 +622,15 @@@ static int stmmac_init_ptp(struct stmma
  	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
  		return -EOPNOTSUPP;
  
 -	if (netif_msg_hw(priv)) {
 -		if (priv->dma_cap.time_stamp) {
 -			pr_debug("IEEE 1588-2002 Time Stamp supported\n");
 -			priv->adv_ts = 0;
 -		}
 -		if (priv->dma_cap.atime_stamp && priv->extend_desc) {
 -			pr_debug
 -			    ("IEEE 1588-2008 Advanced Time Stamp supported\n");
 -			priv->adv_ts = 1;
 -		}
 -	}
 +	priv->adv_ts = 0;
 +	if (priv->dma_cap.atime_stamp && priv->extend_desc)
 +		priv->adv_ts = 1;
 +
 +	if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
 +		pr_debug("IEEE 1588-2002 Time Stamp supported\n");
 +
 +	if (netif_msg_hw(priv) && priv->adv_ts)
 +		pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
  
  	priv->hw->ptp = &stmmac_ptp;
  	priv->hwts_tx_en = 0;
diff --combined drivers/net/ethernet/ti/cpsw.c
index 5330fd2,243fffb..e8bb77d
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@@ -582,7 -582,7 +582,7 @@@ static void cpsw_intr_disable(struct cp
  	return;
  }
  
- void cpsw_tx_handler(void *token, int len, int status)
+ static void cpsw_tx_handler(void *token, int len, int status)
  {
  	struct sk_buff		*skb = token;
  	struct net_device	*ndev = skb->dev;
@@@ -599,7 -599,7 +599,7 @@@
  	dev_kfree_skb_any(skb);
  }
  
- void cpsw_rx_handler(void *token, int len, int status)
+ static void cpsw_rx_handler(void *token, int len, int status)
  {
  	struct sk_buff		*skb = token;
  	struct sk_buff		*new_skb;
@@@ -740,8 -740,6 +740,8 @@@ static void _cpsw_adjust_link(struct cp
  		/* set speed_in input in case RMII mode is used in 100Mbps */
  		if (phy->speed == 100)
  			mac_control |= BIT(15);
 +		else if (phy->speed == 10)
 +			mac_control |= BIT(18); /* In Band mode */
  
  		*link = true;
  	} else {
@@@ -1331,7 -1329,7 +1331,7 @@@ static void cpsw_hwtstamp_v2(struct cps
  	__raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
  }
  
- static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
  {
  	struct cpsw_priv *priv = netdev_priv(dev);
  	struct cpts *cpts = priv->cpts;
@@@ -1392,6 -1390,24 +1392,24 @@@
  	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  }
  
+ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+ {
+ 	struct cpsw_priv *priv = netdev_priv(dev);
+ 	struct cpts *cpts = priv->cpts;
+ 	struct hwtstamp_config cfg;
+ 
+ 	if (priv->version != CPSW_VERSION_1 &&
+ 	    priv->version != CPSW_VERSION_2)
+ 		return -EOPNOTSUPP;
+ 
+ 	cfg.flags = 0;
+ 	cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ 	cfg.rx_filter = (cpts->rx_enable ?
+ 			 HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
+ 
+ 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ }
+ 
  #endif /*CONFIG_TI_CPTS*/
  
  static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@@ -1406,7 -1422,9 +1424,9 @@@
  	switch (cmd) {
  #ifdef CONFIG_TI_CPTS
  	case SIOCSHWTSTAMP:
- 		return cpsw_hwtstamp_ioctl(dev, req);
+ 		return cpsw_hwtstamp_set(dev, req);
+ 	case SIOCGHWTSTAMP:
+ 		return cpsw_hwtstamp_get(dev, req);
  #endif
  	case SIOCGMIIPHY:
  		data->phy_id = priv->slaves[slave_no].phy->addr;
@@@ -2108,7 -2126,7 +2128,7 @@@ static int cpsw_probe(struct platform_d
  	while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
  		for (i = res->start; i <= res->end; i++) {
  			if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
 -					     dev_name(priv->dev), priv)) {
 +					     dev_name(&pdev->dev), priv)) {
  				dev_err(priv->dev, "error attaching irq\n");
  				goto clean_ale_ret;
  			}
@@@ -2137,8 -2155,8 +2157,8 @@@
  			  data->cpts_clock_mult, data->cpts_clock_shift))
  		dev_err(priv->dev, "error registering cpts device\n");
  
- 	cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
- 		    ss_res->start, ndev->irq);
+ 	cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
+ 		    &ss_res->start, ndev->irq);
  
  	if (priv->data.dual_emac) {
  		ret = cpsw_probe_dual_emac(pdev, priv);
diff --combined drivers/net/hyperv/netvsc_drv.c
index 71baeb3,f80bd0c..7756118
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@@ -11,8 -11,7 +11,7 @@@
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
-  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
-  * Place - Suite 330, Boston, MA 02111-1307 USA.
+  * this program; if not, see <http://www.gnu.org/licenses/>.
   *
   * Authors:
   *   Haiyang Zhang <haiyangz at microsoft.com>
@@@ -261,7 -260,9 +260,7 @@@ int netvsc_recv_callback(struct hv_devi
  	struct sk_buff *skb;
  
  	net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
 -	if (!net) {
 -		netdev_err(net, "got receive callback but net device"
 -			" not initialized yet\n");
 +	if (!net || net->reg_state != NETREG_REGISTERED) {
  		packet->status = NVSP_STAT_FAIL;
  		return 0;
  	}
@@@ -433,11 -434,19 +432,11 @@@ static int netvsc_probe(struct hv_devic
  	SET_ETHTOOL_OPS(net, &ethtool_ops);
  	SET_NETDEV_DEV(net, &dev->device);
  
 -	ret = register_netdev(net);
 -	if (ret != 0) {
 -		pr_err("Unable to register netdev.\n");
 -		free_netdev(net);
 -		goto out;
 -	}
 -
  	/* Notify the netvsc driver of the new device */
  	device_info.ring_size = ring_size;
  	ret = rndis_filter_device_add(dev, &device_info);
  	if (ret != 0) {
  		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
 -		unregister_netdev(net);
  		free_netdev(net);
  		hv_set_drvdata(dev, NULL);
  		return ret;
@@@ -446,13 -455,7 +445,13 @@@
  
  	netif_carrier_on(net);
  
 -out:
 +	ret = register_netdev(net);
 +	if (ret != 0) {
 +		pr_err("Unable to register netdev.\n");
 +		rndis_filter_device_remove(dev);
 +		free_netdev(net);
 +	}
 +
  	return ret;
  }
  
diff --combined drivers/net/macvlan.c
index 60406b0,9419836..09ababe
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@@ -120,7 -120,7 +120,7 @@@ static int macvlan_broadcast_one(struc
  	struct net_device *dev = vlan->dev;
  
  	if (local)
- 		return vlan->forward(dev, skb);
+ 		return dev_forward_skb(dev, skb);
  
  	skb->dev = dev;
  	if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@@ -128,7 -128,7 +128,7 @@@
  	else
  		skb->pkt_type = PACKET_MULTICAST;
  
- 	return vlan->receive(skb);
+ 	return netif_rx(skb);
  }
  
  static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@@ -251,7 -251,7 +251,7 @@@ static rx_handler_result_t macvlan_hand
  	skb->dev = dev;
  	skb->pkt_type = PACKET_HOST;
  
- 	ret = vlan->receive(skb);
+ 	ret = netif_rx(skb);
  
  out:
  	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
@@@ -290,8 -290,8 +290,8 @@@ xmit_world
  	return dev_queue_xmit(skb);
  }
  
- netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
- 			       struct net_device *dev)
+ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
+ 				      struct net_device *dev)
  {
  	unsigned int len = skb->len;
  	int ret;
@@@ -305,7 -305,7 +305,7 @@@
  	}
  
  	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- 		struct macvlan_pcpu_stats *pcpu_stats;
+ 		struct vlan_pcpu_stats *pcpu_stats;
  
  		pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
  		u64_stats_update_begin(&pcpu_stats->syncp);
@@@ -317,7 -317,6 +317,6 @@@
  	}
  	return ret;
  }
- EXPORT_SYMBOL_GPL(macvlan_start_xmit);
  
  static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
  			       unsigned short type, const void *daddr,
@@@ -546,12 -545,12 +545,12 @@@ static int macvlan_init(struct net_devi
  
  	macvlan_set_lockdep_class(dev);
  
- 	vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+ 	vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
  	if (!vlan->pcpu_stats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
- 		struct macvlan_pcpu_stats *mvlstats;
+ 		struct vlan_pcpu_stats *mvlstats;
  		mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
  		u64_stats_init(&mvlstats->syncp);
  	}
@@@ -577,7 -576,7 +576,7 @@@ static struct rtnl_link_stats64 *macvla
  	struct macvlan_dev *vlan = netdev_priv(dev);
  
  	if (vlan->pcpu_stats) {
- 		struct macvlan_pcpu_stats *p;
+ 		struct vlan_pcpu_stats *p;
  		u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
  		u32 rx_errors = 0, tx_dropped = 0;
  		unsigned int start;
@@@ -690,19 -689,8 +689,19 @@@ static netdev_features_t macvlan_fix_fe
  					      netdev_features_t features)
  {
  	struct macvlan_dev *vlan = netdev_priv(dev);
 +	netdev_features_t mask;
  
 -	return features & (vlan->set_features | ~MACVLAN_FEATURES);
 +	features |= NETIF_F_ALL_FOR_ALL;
 +	features &= (vlan->set_features | ~MACVLAN_FEATURES);
 +	mask = features;
 +
 +	features = netdev_increment_features(vlan->lowerdev->features,
 +					     features,
 +					     mask);
 +	if (!vlan->fwd_priv)
 +		features |= NETIF_F_LLTX;
 +
 +	return features;
  }
  
  static const struct ethtool_ops macvlan_ethtool_ops = {
@@@ -814,10 -802,7 +813,7 @@@ static int macvlan_validate(struct nlat
  }
  
  int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- 			   struct nlattr *tb[], struct nlattr *data[],
- 			   int (*receive)(struct sk_buff *skb),
- 			   int (*forward)(struct net_device *dev,
- 					  struct sk_buff *skb))
+ 			   struct nlattr *tb[], struct nlattr *data[])
  {
  	struct macvlan_dev *vlan = netdev_priv(dev);
  	struct macvlan_port *port;
@@@ -831,13 -816,11 +827,11 @@@
  	if (lowerdev == NULL)
  		return -ENODEV;
  
- 	/* When creating macvlans on top of other macvlans - use
+ 	/* When creating macvlans or macvtaps on top of other macvlans - use
  	 * the real device as the lowerdev.
  	 */
- 	if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
- 		struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
- 		lowerdev = lowervlan->lowerdev;
- 	}
+ 	if (netif_is_macvlan(lowerdev))
+ 		lowerdev = macvlan_dev_real_dev(lowerdev);
  
  	if (!tb[IFLA_MTU])
  		dev->mtu = lowerdev->mtu;
@@@ -861,8 -844,6 +855,6 @@@
  	vlan->lowerdev = lowerdev;
  	vlan->dev      = dev;
  	vlan->port     = port;
- 	vlan->receive  = receive;
- 	vlan->forward  = forward;
  	vlan->set_features = MACVLAN_FEATURES;
  
  	vlan->mode     = MACVLAN_MODE_VEPA;
@@@ -907,9 -888,7 +899,7 @@@ EXPORT_SYMBOL_GPL(macvlan_common_newlin
  static int macvlan_newlink(struct net *src_net, struct net_device *dev,
  			   struct nlattr *tb[], struct nlattr *data[])
  {
- 	return macvlan_common_newlink(src_net, dev, tb, data,
- 				      netif_rx,
- 				      dev_forward_skb);
+ 	return macvlan_common_newlink(src_net, dev, tb, data);
  }
  
  void macvlan_dellink(struct net_device *dev, struct list_head *head)
@@@ -1030,8 -1009,9 +1020,8 @@@ static int macvlan_device_event(struct 
  		break;
  	case NETDEV_FEAT_CHANGE:
  		list_for_each_entry(vlan, &port->vlans, list) {
 -			vlan->dev->features = dev->features & MACVLAN_FEATURES;
  			vlan->dev->gso_max_size = dev->gso_max_size;
 -			netdev_features_change(vlan->dev);
 +			netdev_update_features(vlan->dev);
  		}
  		break;
  	case NETDEV_UNREGISTER:
diff --combined drivers/net/phy/phy.c
index 98434b8,19da5ab6..76e8936
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@@ -1,7 -1,4 +1,4 @@@
- /*
-  * drivers/net/phy/phy.c
-  *
-  * Framework for configuring and reading PHY devices
+ /* Framework for configuring and reading PHY devices
   * Based on code in sungem_phy.c and gianfar_phy.c
   *
   * Author: Andy Fleming
@@@ -36,11 -33,11 +33,11 @@@
  #include <linux/timer.h>
  #include <linux/workqueue.h>
  #include <linux/mdio.h>
- 
+ #include <linux/io.h>
+ #include <linux/uaccess.h>
  #include <linux/atomic.h>
- #include <asm/io.h>
+ 
  #include <asm/irq.h>
- #include <asm/uaccess.h>
  
  /**
   * phy_print_status - Convenience function to print out the current phy status
@@@ -48,13 -45,14 +45,14 @@@
   */
  void phy_print_status(struct phy_device *phydev)
  {
- 	if (phydev->link)
+ 	if (phydev->link) {
  		pr_info("%s - Link is Up - %d/%s\n",
  			dev_name(&phydev->dev),
  			phydev->speed,
  			DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
- 	else
+ 	} else	{
  		pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+ 	}
  }
  EXPORT_SYMBOL(phy_print_status);
  
@@@ -69,12 -67,10 +67,10 @@@
   */
  static int phy_clear_interrupt(struct phy_device *phydev)
  {
- 	int err = 0;
- 
  	if (phydev->drv->ack_interrupt)
- 		err = phydev->drv->ack_interrupt(phydev);
+ 		return phydev->drv->ack_interrupt(phydev);
  
- 	return err;
+ 	return 0;
  }
  
  /**
@@@ -86,13 -82,11 +82,11 @@@
   */
  static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  {
- 	int err = 0;
- 
  	phydev->interrupts = interrupts;
  	if (phydev->drv->config_intr)
- 		err = phydev->drv->config_intr(phydev);
+ 		return phydev->drv->config_intr(phydev);
  
- 	return err;
+ 	return 0;
  }
  
  
@@@ -106,15 -100,14 +100,14 @@@
   */
  static inline int phy_aneg_done(struct phy_device *phydev)
  {
- 	int retval;
- 
- 	retval = phy_read(phydev, MII_BMSR);
+ 	int retval = phy_read(phydev, MII_BMSR);
  
  	return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
  }
  
  /* A structure for mapping a particular speed and duplex
-  * combination to a particular SUPPORTED and ADVERTISED value */
+  * combination to a particular SUPPORTED and ADVERTISED value
+  */
  struct phy_setting {
  	int speed;
  	int duplex;
@@@ -177,8 -170,7 +170,7 @@@ static inline int phy_find_setting(int 
  	int idx = 0;
  
  	while (idx < ARRAY_SIZE(settings) &&
- 			(settings[idx].speed != speed ||
- 			settings[idx].duplex != duplex))
+ 	       (settings[idx].speed != speed || settings[idx].duplex != duplex))
  		idx++;
  
  	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
@@@ -245,8 -237,7 +237,7 @@@ int phy_ethtool_sset(struct phy_device 
  	if (cmd->phy_address != phydev->addr)
  		return -EINVAL;
  
- 	/* We make sure that we don't pass unsupported
- 	 * values in to the PHY */
+ 	/* We make sure that we don't pass unsupported values in to the PHY */
  	cmd->advertising &= phydev->supported;
  
  	/* Verify the settings we care about. */
@@@ -289,6 -280,7 +280,7 @@@ int phy_ethtool_gset(struct phy_device 
  	cmd->supported = phydev->supported;
  
  	cmd->advertising = phydev->advertising;
+ 	cmd->lp_advertising = phydev->lp_advertising;
  
  	ethtool_cmd_speed_set(cmd, phydev->speed);
  	cmd->duplex = phydev->duplex;
@@@ -312,8 -304,7 +304,7 @@@ EXPORT_SYMBOL(phy_ethtool_gset)
   * PHYCONTROL layer.  It changes registers without regard to
   * current state.  Use at own risk.
   */
- int phy_mii_ioctl(struct phy_device *phydev,
- 		struct ifreq *ifr, int cmd)
+ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
  {
  	struct mii_ioctl_data *mii_data = if_mii(ifr);
  	u16 val = mii_data->val_in;
@@@ -326,25 -317,24 +317,24 @@@
  	case SIOCGMIIREG:
  		mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
  						 mii_data->reg_num);
- 		break;
+ 		return 0;
  
  	case SIOCSMIIREG:
  		if (mii_data->phy_id == phydev->addr) {
- 			switch(mii_data->reg_num) {
+ 			switch (mii_data->reg_num) {
  			case MII_BMCR:
- 				if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
+ 				if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
  					phydev->autoneg = AUTONEG_DISABLE;
  				else
  					phydev->autoneg = AUTONEG_ENABLE;
- 				if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
+ 				if (!phydev->autoneg && (val & BMCR_FULLDPLX))
  					phydev->duplex = DUPLEX_FULL;
  				else
  					phydev->duplex = DUPLEX_HALF;
- 				if ((!phydev->autoneg) &&
- 						(val & BMCR_SPEED1000))
+ 				if (!phydev->autoneg && (val & BMCR_SPEED1000))
  					phydev->speed = SPEED_1000;
- 				else if ((!phydev->autoneg) &&
- 						(val & BMCR_SPEED100))
+ 				else if (!phydev->autoneg &&
+ 					 (val & BMCR_SPEED100))
  					phydev->speed = SPEED_100;
  				break;
  			case MII_ADVERTISE:
@@@ -360,12 -350,9 +350,9 @@@
  			      mii_data->reg_num, val);
  
  		if (mii_data->reg_num == MII_BMCR &&
- 		    val & BMCR_RESET &&
- 		    phydev->drv->config_init) {
- 			phy_scan_fixups(phydev);
- 			phydev->drv->config_init(phydev);
- 		}
- 		break;
+ 		    val & BMCR_RESET)
+ 			return phy_init_hw(phydev);
+ 		return 0;
  
  	case SIOCSHWTSTAMP:
  		if (phydev->drv->hwtstamp)
@@@ -375,8 -362,6 +362,6 @@@
  	default:
  		return -EOPNOTSUPP;
  	}
- 
- 	return 0;
  }
  EXPORT_SYMBOL(phy_mii_ioctl);
  
@@@ -399,7 -384,6 +384,6 @@@ int phy_start_aneg(struct phy_device *p
  		phy_sanitize_settings(phydev);
  
  	err = phydev->drv->config_aneg(phydev);
- 
  	if (err < 0)
  		goto out_unlock;
  
@@@ -419,25 -403,18 +403,18 @@@ out_unlock
  }
  EXPORT_SYMBOL(phy_start_aneg);
  
  /**
   * phy_start_machine - start PHY state machine tracking
   * @phydev: the phy_device struct
-  * @handler: callback function for state change notifications
   *
   * Description: The PHY infrastructure can run a state machine
   *   which tracks whether the PHY is starting up, negotiating,
   *   etc.  This function starts the timer which tracks the state
-  *   of the PHY.  If you want to be notified when the state changes,
-  *   pass in the callback @handler, otherwise, pass NULL.  If you
-  *   want to maintain your own state machine, do not call this
-  *   function.
+  *   of the PHY.  If you want to maintain your own state machine,
+  *   do not call this function.
   */
- void phy_start_machine(struct phy_device *phydev,
- 		void (*handler)(struct net_device *))
+ void phy_start_machine(struct phy_device *phydev)
  {
- 	phydev->adjust_state = handler;
- 
  	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
  }
  
@@@ -457,8 -434,6 +434,6 @@@ void phy_stop_machine(struct phy_devic
  	if (phydev->state > PHY_UP)
  		phydev->state = PHY_UP;
  	mutex_unlock(&phydev->lock);
- 
- 	phydev->adjust_state = NULL;
  }
  
  /**
@@@ -495,7 -470,8 +470,8 @@@ static irqreturn_t phy_interrupt(int ir
  	/* The MDIO bus is not allowed to be written in interrupt
  	 * context, so we need to disable the irq here.  A work
  	 * queue will write the PHY to disable and clear the
- 	 * interrupt, and then reenable the irq line. */
+ 	 * interrupt, and then reenable the irq line.
+ 	 */
  	disable_irq_nosync(irq);
  	atomic_inc(&phydev->irq_disable);
  
@@@ -510,16 -486,12 +486,12 @@@
   */
  static int phy_enable_interrupts(struct phy_device *phydev)
  {
- 	int err;
- 
- 	err = phy_clear_interrupt(phydev);
+ 	int err = phy_clear_interrupt(phydev);
  
  	if (err < 0)
  		return err;
  
- 	err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
- 
- 	return err;
+ 	return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  }
  
  /**
@@@ -532,13 -504,11 +504,11 @@@ static int phy_disable_interrupts(struc
  
  	/* Disable PHY interrupts */
  	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
- 
  	if (err)
  		goto phy_err;
  
  	/* Clear the interrupt */
  	err = phy_clear_interrupt(phydev);
- 
  	if (err)
  		goto phy_err;
  
@@@ -562,20 -532,18 +532,16 @@@ phy_err
   */
  int phy_start_interrupts(struct phy_device *phydev)
  {
- 	int err = 0;
- 
  	atomic_set(&phydev->irq_disable, 0);
 -	if (request_irq(phydev->irq, phy_interrupt,
 -				IRQF_SHARED,
 -				"phy_interrupt",
 -				phydev) < 0) {
 +	if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
 +			phydev) < 0) {
  		pr_warn("%s: Can't get IRQ %d (PHY)\n",
  			phydev->bus->name, phydev->irq);
  		phydev->irq = PHY_POLL;
  		return 0;
  	}
  
- 	err = phy_enable_interrupts(phydev);
- 
- 	return err;
+ 	return phy_enable_interrupts(phydev);
  }
  EXPORT_SYMBOL(phy_start_interrupts);
  
@@@ -585,24 -553,20 +551,20 @@@
   */
  int phy_stop_interrupts(struct phy_device *phydev)
  {
- 	int err;
- 
- 	err = phy_disable_interrupts(phydev);
+ 	int err = phy_disable_interrupts(phydev);
  
  	if (err)
  		phy_error(phydev);
  
  	free_irq(phydev->irq, phydev);
  
- 	/*
- 	 * Cannot call flush_scheduled_work() here as desired because
+ 	/* Cannot call flush_scheduled_work() here as desired because
  	 * of rtnl_lock(), but we do not really care about what would
  	 * be done, except from enable_irq(), so cancel any work
  	 * possibly pending and take care of the matter below.
  	 */
  	cancel_work_sync(&phydev->phy_queue);
- 	/*
- 	 * If work indeed has been cancelled, disable_irq() will have
+ 	/* If work indeed has been cancelled, disable_irq() will have
  	 * been left unbalanced from phy_interrupt() and enable_irq()
  	 * has to be called so that other devices on the line work.
  	 */
@@@ -613,14 -577,12 +575,12 @@@
  }
  EXPORT_SYMBOL(phy_stop_interrupts);
  
- 
  /**
   * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
   * @work: work_struct that describes the work to be done
   */
  void phy_change(struct work_struct *work)
  {
- 	int err;
  	struct phy_device *phydev =
  		container_of(work, struct phy_device, phy_queue);
  
@@@ -628,9 -590,7 +588,7 @@@
  	    !phydev->drv->did_interrupt(phydev))
  		goto ignore;
  
- 	err = phy_disable_interrupts(phydev);
- 
- 	if (err)
+ 	if (phy_disable_interrupts(phydev))
  		goto phy_err;
  
  	mutex_lock(&phydev->lock);
@@@ -642,16 -602,13 +600,13 @@@
  	enable_irq(phydev->irq);
  
  	/* Reenable interrupts */
- 	if (PHY_HALTED != phydev->state)
- 		err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
- 
- 	if (err)
+ 	if (PHY_HALTED != phydev->state &&
+ 	    phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
  		goto irq_enable_err;
  
  	/* reschedule state queue work to run as soon as possible */
  	cancel_delayed_work_sync(&phydev->state_queue);
  	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
- 
  	return;
  
  ignore:
@@@ -690,13 -647,12 +645,12 @@@ void phy_stop(struct phy_device *phydev
  out_unlock:
  	mutex_unlock(&phydev->lock);
  
- 	/*
- 	 * Cannot call flush_scheduled_work() here as desired because
+ 	/* Cannot call flush_scheduled_work() here as desired because
  	 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  	 * will not reenable interrupts.
  	 */
  }
- 
+ EXPORT_SYMBOL(phy_stop);
  
  /**
   * phy_start - start or restart a PHY device
@@@ -713,20 -669,19 +667,19 @@@ void phy_start(struct phy_device *phyde
  	mutex_lock(&phydev->lock);
  
  	switch (phydev->state) {
- 		case PHY_STARTING:
- 			phydev->state = PHY_PENDING;
- 			break;
- 		case PHY_READY:
- 			phydev->state = PHY_UP;
- 			break;
- 		case PHY_HALTED:
- 			phydev->state = PHY_RESUMING;
- 		default:
- 			break;
+ 	case PHY_STARTING:
+ 		phydev->state = PHY_PENDING;
+ 		break;
+ 	case PHY_READY:
+ 		phydev->state = PHY_UP;
+ 		break;
+ 	case PHY_HALTED:
+ 		phydev->state = PHY_RESUMING;
+ 	default:
+ 		break;
  	}
  	mutex_unlock(&phydev->lock);
  }
- EXPORT_SYMBOL(phy_stop);
  EXPORT_SYMBOL(phy_start);
  
  /**
@@@ -738,160 -693,132 +691,132 @@@ void phy_state_machine(struct work_stru
  	struct delayed_work *dwork = to_delayed_work(work);
  	struct phy_device *phydev =
  			container_of(dwork, struct phy_device, state_queue);
- 	int needs_aneg = 0;
+ 	int needs_aneg = 0, do_suspend = 0;
  	int err = 0;
  
  	mutex_lock(&phydev->lock);
  
- 	if (phydev->adjust_state)
- 		phydev->adjust_state(phydev->attached_dev);
+ 	switch (phydev->state) {
+ 	case PHY_DOWN:
+ 	case PHY_STARTING:
+ 	case PHY_READY:
+ 	case PHY_PENDING:
+ 		break;
+ 	case PHY_UP:
+ 		needs_aneg = 1;
  
- 	switch(phydev->state) {
- 		case PHY_DOWN:
- 		case PHY_STARTING:
- 		case PHY_READY:
- 		case PHY_PENDING:
- 			break;
- 		case PHY_UP:
- 			needs_aneg = 1;
+ 		phydev->link_timeout = PHY_AN_TIMEOUT;
  
- 			phydev->link_timeout = PHY_AN_TIMEOUT;
+ 		break;
+ 	case PHY_AN:
+ 		err = phy_read_status(phydev);
+ 		if (err < 0)
+ 			break;
  
+ 		/* If the link is down, give up on negotiation for now */
+ 		if (!phydev->link) {
+ 			phydev->state = PHY_NOLINK;
+ 			netif_carrier_off(phydev->attached_dev);
+ 			phydev->adjust_link(phydev->attached_dev);
  			break;
- 		case PHY_AN:
- 			err = phy_read_status(phydev);
+ 		}
  
- 			if (err < 0)
- 				break;
+ 		/* Check if negotiation is done.  Break if there's an error */
+ 		err = phy_aneg_done(phydev);
+ 		if (err < 0)
+ 			break;
  
- 			/* If the link is down, give up on
- 			 * negotiation for now */
- 			if (!phydev->link) {
- 				phydev->state = PHY_NOLINK;
- 				netif_carrier_off(phydev->attached_dev);
- 				phydev->adjust_link(phydev->attached_dev);
- 				break;
- 			}
+ 		/* If AN is done, we're running */
+ 		if (err > 0) {
+ 			phydev->state = PHY_RUNNING;
+ 			netif_carrier_on(phydev->attached_dev);
+ 			phydev->adjust_link(phydev->attached_dev);
  
- 			/* Check if negotiation is done.  Break
- 			 * if there's an error */
- 			err = phy_aneg_done(phydev);
- 			if (err < 0)
+ 		} else if (0 == phydev->link_timeout--) {
+ 			needs_aneg = 1;
+ 			/* If we have the magic_aneg bit, we try again */
+ 			if (phydev->drv->flags & PHY_HAS_MAGICANEG)
  				break;
- 
- 			/* If AN is done, we're running */
- 			if (err > 0) {
- 				phydev->state = PHY_RUNNING;
- 				netif_carrier_on(phydev->attached_dev);
- 				phydev->adjust_link(phydev->attached_dev);
- 
- 			} else if (0 == phydev->link_timeout--) {
- 				needs_aneg = 1;
- 				/* If we have the magic_aneg bit,
- 				 * we try again */
- 				if (phydev->drv->flags & PHY_HAS_MAGICANEG)
- 					break;
- 			}
+ 		}
+ 		break;
+ 	case PHY_NOLINK:
+ 		err = phy_read_status(phydev);
+ 		if (err)
  			break;
- 		case PHY_NOLINK:
- 			err = phy_read_status(phydev);
- 
- 			if (err)
- 				break;
  
- 			if (phydev->link) {
- 				phydev->state = PHY_RUNNING;
- 				netif_carrier_on(phydev->attached_dev);
- 				phydev->adjust_link(phydev->attached_dev);
- 			}
+ 		if (phydev->link) {
+ 			phydev->state = PHY_RUNNING;
+ 			netif_carrier_on(phydev->attached_dev);
+ 			phydev->adjust_link(phydev->attached_dev);
+ 		}
+ 		break;
+ 	case PHY_FORCING:
+ 		err = genphy_update_link(phydev);
+ 		if (err)
  			break;
- 		case PHY_FORCING:
- 			err = genphy_update_link(phydev);
- 
- 			if (err)
- 				break;
  
- 			if (phydev->link) {
- 				phydev->state = PHY_RUNNING;
- 				netif_carrier_on(phydev->attached_dev);
- 			} else {
- 				if (0 == phydev->link_timeout--)
- 					needs_aneg = 1;
- 			}
+ 		if (phydev->link) {
+ 			phydev->state = PHY_RUNNING;
+ 			netif_carrier_on(phydev->attached_dev);
+ 		} else {
+ 			if (0 == phydev->link_timeout--)
+ 				needs_aneg = 1;
+ 		}
  
- 			phydev->adjust_link(phydev->attached_dev);
- 			break;
- 		case PHY_RUNNING:
- 			/* Only register a CHANGE if we are
- 			 * polling or ignoring interrupts
- 			 */
- 			if (!phy_interrupt_is_valid(phydev))
- 				phydev->state = PHY_CHANGELINK;
+ 		phydev->adjust_link(phydev->attached_dev);
+ 		break;
+ 	case PHY_RUNNING:
+ 		/* Only register a CHANGE if we are
+ 		 * polling or ignoring interrupts
+ 		 */
+ 		if (!phy_interrupt_is_valid(phydev))
+ 			phydev->state = PHY_CHANGELINK;
+ 		break;
+ 	case PHY_CHANGELINK:
+ 		err = phy_read_status(phydev);
+ 		if (err)
  			break;
- 		case PHY_CHANGELINK:
- 			err = phy_read_status(phydev);
  
- 			if (err)
- 				break;
+ 		if (phydev->link) {
+ 			phydev->state = PHY_RUNNING;
+ 			netif_carrier_on(phydev->attached_dev);
+ 		} else {
+ 			phydev->state = PHY_NOLINK;
+ 			netif_carrier_off(phydev->attached_dev);
+ 		}
  
- 			if (phydev->link) {
- 				phydev->state = PHY_RUNNING;
- 				netif_carrier_on(phydev->attached_dev);
- 			} else {
- 				phydev->state = PHY_NOLINK;
- 				netif_carrier_off(phydev->attached_dev);
- 			}
+ 		phydev->adjust_link(phydev->attached_dev);
  
+ 		if (phy_interrupt_is_valid(phydev))
+ 			err = phy_config_interrupt(phydev,
+ 						   PHY_INTERRUPT_ENABLED);
+ 		break;
+ 	case PHY_HALTED:
+ 		if (phydev->link) {
+ 			phydev->link = 0;
+ 			netif_carrier_off(phydev->attached_dev);
  			phydev->adjust_link(phydev->attached_dev);
- 
- 			if (phy_interrupt_is_valid(phydev))
- 				err = phy_config_interrupt(phydev,
- 						PHY_INTERRUPT_ENABLED);
- 			break;
- 		case PHY_HALTED:
- 			if (phydev->link) {
- 				phydev->link = 0;
- 				netif_carrier_off(phydev->attached_dev);
- 				phydev->adjust_link(phydev->attached_dev);
- 			}
+ 			do_suspend = 1;
+ 		}
+ 		break;
+ 	case PHY_RESUMING:
+ 		err = phy_clear_interrupt(phydev);
+ 		if (err)
  			break;
- 		case PHY_RESUMING:
- 
- 			err = phy_clear_interrupt(phydev);
  
- 			if (err)
- 				break;
- 
- 			err = phy_config_interrupt(phydev,
- 					PHY_INTERRUPT_ENABLED);
+ 		err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+ 		if (err)
+ 			break;
  
- 			if (err)
+ 		if (AUTONEG_ENABLE == phydev->autoneg) {
+ 			err = phy_aneg_done(phydev);
+ 			if (err < 0)
  				break;
  
- 			if (AUTONEG_ENABLE == phydev->autoneg) {
- 				err = phy_aneg_done(phydev);
- 				if (err < 0)
- 					break;
- 
- 				/* err > 0 if AN is done.
- 				 * Otherwise, it's 0, and we're
- 				 * still waiting for AN */
- 				if (err > 0) {
- 					err = phy_read_status(phydev);
- 					if (err)
- 						break;
- 
- 					if (phydev->link) {
- 						phydev->state = PHY_RUNNING;
- 						netif_carrier_on(phydev->attached_dev);
- 					} else
- 						phydev->state = PHY_NOLINK;
- 					phydev->adjust_link(phydev->attached_dev);
- 				} else {
- 					phydev->state = PHY_AN;
- 					phydev->link_timeout = PHY_AN_TIMEOUT;
- 				}
- 			} else {
+ 			/* err > 0 if AN is done.
+ 			 * Otherwise, it's 0, and we're  still waiting for AN
+ 			 */
+ 			if (err > 0) {
  				err = phy_read_status(phydev);
  				if (err)
  					break;
@@@ -899,11 -826,28 +824,28 @@@
  				if (phydev->link) {
  					phydev->state = PHY_RUNNING;
  					netif_carrier_on(phydev->attached_dev);
- 				} else
+ 				} else	{
  					phydev->state = PHY_NOLINK;
+ 				}
  				phydev->adjust_link(phydev->attached_dev);
+ 			} else {
+ 				phydev->state = PHY_AN;
+ 				phydev->link_timeout = PHY_AN_TIMEOUT;
  			}
- 			break;
+ 		} else {
+ 			err = phy_read_status(phydev);
+ 			if (err)
+ 				break;
+ 
+ 			if (phydev->link) {
+ 				phydev->state = PHY_RUNNING;
+ 				netif_carrier_on(phydev->attached_dev);
+ 			} else	{
+ 				phydev->state = PHY_NOLINK;
+ 			}
+ 			phydev->adjust_link(phydev->attached_dev);
+ 		}
+ 		break;
  	}
  
  	mutex_unlock(&phydev->lock);
@@@ -911,11 -855,14 +853,14 @@@
  	if (needs_aneg)
  		err = phy_start_aneg(phydev);
  
+ 	if (do_suspend)
+ 		phy_suspend(phydev);
+ 
  	if (err < 0)
  		phy_error(phydev);
  
  	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- 			PHY_STATE_TIME * HZ);
+ 			   PHY_STATE_TIME * HZ);
  }
  
  void phy_mac_interrupt(struct phy_device *phydev, int new_link)
@@@ -957,14 -904,10 +902,10 @@@ static inline void mmd_phy_indirect(str
  static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  				 int addr)
  {
  	mmd_phy_indirect(bus, prtad, devad, addr);
  
  	/* Read the content of the MMD's selected register */
- 	ret = bus->read(bus, addr, MII_MMD_DATA);
- 
- 	return ret;
+ 	return bus->read(bus, addr, MII_MMD_DATA);
  }
  
  /**
@@@ -1004,8 -947,6 +945,6 @@@ static void phy_write_mmd_indirect(stru
   */
  int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
  {
- 	int ret = -EPROTONOSUPPORT;
- 
  	/* According to 802.3az,the EEE is supported only in full duplex-mode.
  	 * Also EEE feature is active when core is operating with MII, GMII
  	 * or RGMII.
@@@ -1031,7 -972,7 +970,7 @@@
  
  		cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
  		if (!cap)
- 			goto eee_exit;
+ 			return -EPROTONOSUPPORT;
  
  		/* Check which link settings negotiated and verify it in
  		 * the EEE advertising registers.
@@@ -1050,7 -991,7 +989,7 @@@
  		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
  		idx = phy_find_setting(phydev->speed, phydev->duplex);
  		if (!(lp & adv & settings[idx].setting))
- 			goto eee_exit;
+ 			return -EPROTONOSUPPORT;
  
  		if (clk_stop_enable) {
  			/* Configure the PHY to stop receiving xMII
@@@ -1067,11 -1008,10 +1006,10 @@@
  					       MDIO_MMD_PCS, phydev->addr, val);
  		}
  
- 		ret = 0; /* EEE supported */
+ 		return 0; /* EEE supported */
  	}
  
- eee_exit:
- 	return ret;
+ 	return -EPROTONOSUPPORT;
  }
  EXPORT_SYMBOL(phy_init_eee);
  
@@@ -1086,7 -1026,6 +1024,6 @@@ int phy_get_eee_err(struct phy_device *
  {
  	return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
  				     MDIO_MMD_PCS, phydev->addr);
- 
  }
  EXPORT_SYMBOL(phy_get_eee_err);
  
@@@ -1136,9 -1075,8 +1073,8 @@@ EXPORT_SYMBOL(phy_ethtool_get_eee)
   */
  int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
  {
- 	int val;
+ 	int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  
- 	val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  	phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
  			       phydev->addr, val);
  
diff --combined drivers/net/usb/mcs7830.c
index f546378,aea68bc..36ff001
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@@ -36,8 -36,7 +36,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
-  * along with this program; if not, write to the Free Software
-  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include <linux/crc32.h>
@@@ -117,6 -116,7 +116,6 @@@ enum 
  struct mcs7830_data {
  	u8 multi_filter[8];
  	u8 config;
 -	u8 link_counter;
  };
  
  static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@@ -560,16 -560,26 +559,16 @@@ static void mcs7830_status(struct usbne
  {
  	u8 *buf = urb->transfer_buffer;
  	bool link, link_changed;
 -	struct mcs7830_data *data = mcs7830_get_data(dev);
  
  	if (urb->actual_length < 16)
  		return;
  
 -	link = !(buf[1] & 0x20);
 +	link = !(buf[1] == 0x20);
  	link_changed = netif_carrier_ok(dev->net) != link;
  	if (link_changed) {
 -		data->link_counter++;
 -		/*
 -		   track link state 20 times to guard against erroneous
 -		   link state changes reported sometimes by the chip
 -		 */
 -		if (data->link_counter > 20) {
 -			data->link_counter = 0;
 -			usbnet_link_change(dev, link, 0);
 -			netdev_dbg(dev->net, "Link Status is: %d\n", link);
 -		}
 -	} else
 -		data->link_counter = 0;
 +		usbnet_link_change(dev, link, 0);
 +		netdev_dbg(dev->net, "Link Status is: %d\n", link);
 +	}
  }
  
  static const struct driver_info moschip_info = {
diff --combined drivers/net/virtio_net.c
index 5d77644,c51a988..7b17240
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@@ -13,8 -13,7 +13,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
-  * along with this program; if not, write to the Free Software
-  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  //#define DEBUG
  #include <linux/netdevice.h>
@@@ -874,16 -873,15 +873,15 @@@ static netdev_tx_t start_xmit(struct sk
  /*
   * Send command via the control virtqueue and check status.  Commands
   * supported by the hypervisor, as indicated by feature bits, should
-  * never fail unless improperly formated.
+  * never fail unless improperly formatted.
   */
  static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- 				 struct scatterlist *out,
- 				 struct scatterlist *in)
+ 				 struct scatterlist *out)
  {
  	struct scatterlist *sgs[4], hdr, stat;
  	struct virtio_net_ctrl_hdr ctrl;
  	virtio_net_ctrl_ack status = ~0;
- 	unsigned out_num = 0, in_num = 0, tmp;
+ 	unsigned out_num = 0, tmp;
  
  	/* Caller should know better */
  	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@@ -896,16 -894,13 +894,13 @@@
  
  	if (out)
  		sgs[out_num++] = out;
  
  	/* Add return status. */
  	sg_init_one(&stat, &status, sizeof(status));
- 	sgs[out_num + in_num++] = &stat;
+ 	sgs[out_num] = &stat;
  
- 	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
- 	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
- 	       < 0);
+ 	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
+ 	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC) < 0);
  
  	if (unlikely(!virtqueue_kick(vi->cvq)))
  		return status == VIRTIO_NET_OK;
@@@ -935,8 -930,7 +930,7 @@@ static int virtnet_set_mac_address(stru
  	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
  		sg_init_one(&sg, addr->sa_data, dev->addr_len);
  		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
- 					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
- 					  &sg, NULL)) {
+ 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
  			dev_warn(&vdev->dev,
  				 "Failed to set mac address by vq command.\n");
  			return -EINVAL;
@@@ -1009,7 -1003,7 +1003,7 @@@ static void virtnet_ack_link_announce(s
  {
  	rtnl_lock();
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
- 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
+ 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
  		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
  	rtnl_unlock();
  }
@@@ -1027,7 -1021,7 +1021,7 @@@ static int virtnet_set_queues(struct vi
  	sg_init_one(&sg, &s, sizeof(s));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
- 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
+ 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
  		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
  			 queue_pairs);
  		return -EINVAL;
@@@ -1067,7 -1061,7 +1061,7 @@@ static void virtnet_set_rx_mode(struct 
  	void *buf;
  	int i;
  
- 	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
+ 	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
  	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
  		return;
  
@@@ -1077,16 -1071,14 +1071,14 @@@
  	sg_init_one(sg, &promisc, sizeof(promisc));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
- 				  VIRTIO_NET_CTRL_RX_PROMISC,
- 				  sg, NULL))
+ 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
  		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
  			 promisc ? "en" : "dis");
  
  	sg_init_one(sg, &allmulti, sizeof(allmulti));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
- 				  VIRTIO_NET_CTRL_RX_ALLMULTI,
- 				  sg, NULL))
+ 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
  		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
  			 allmulti ? "en" : "dis");
  
@@@ -1122,8 -1114,7 +1114,7 @@@
  		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
- 				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
- 				  sg, NULL))
+ 				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
  		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
  
  	kfree(buf);
@@@ -1138,7 -1129,7 +1129,7 @@@ static int virtnet_vlan_rx_add_vid(stru
  	sg_init_one(&sg, &vid, sizeof(vid));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
+ 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
  		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
  	return 0;
  }
@@@ -1152,7 -1143,7 +1143,7 @@@ static int virtnet_vlan_rx_kill_vid(str
  	sg_init_one(&sg, &vid, sizeof(vid));
  
  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
+ 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
  		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
  	return 0;
  }
@@@ -1797,17 -1788,16 +1788,17 @@@ static int virtnet_restore(struct virti
  	if (err)
  		return err;
  
 -	if (netif_running(vi->dev))
 +	if (netif_running(vi->dev)) {
 +		for (i = 0; i < vi->curr_queue_pairs; i++)
 +			if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
 +				schedule_delayed_work(&vi->refill, 0);
 +
  		for (i = 0; i < vi->max_queue_pairs; i++)
  			virtnet_napi_enable(&vi->rq[i]);
 +	}
  
  	netif_device_attach(vi->dev);
  
 -	for (i = 0; i < vi->curr_queue_pairs; i++)
 -		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
 -			schedule_delayed_work(&vi->refill, 0);
 -
  	mutex_lock(&vi->config_lock);
  	vi->config_enable = true;
  	mutex_unlock(&vi->config_lock);
diff --combined drivers/net/vxlan.c
index ed384fe,474a99e..599c4a5
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@@ -916,17 -916,32 +916,32 @@@ static bool vxlan_snoop(struct net_devi
  }
  
  /* See if multicast group is already in use by other ID */
- static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip)
+ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
  {
  	struct vxlan_dev *vxlan;
  
+ 	/* The vxlan_sock is only used by dev, leaving group has
+ 	 * no effect on other vxlan devices.
+ 	 */
+ 	if (atomic_read(&dev->vn_sock->refcnt) == 1)
+ 		return false;
+ 
  	list_for_each_entry(vxlan, &vn->vxlan_list, next) {
- 		if (!netif_running(vxlan->dev))
+ 		if (!netif_running(vxlan->dev) || vxlan == dev)
  			continue;
  
- 		if (vxlan_addr_equal(&vxlan->default_dst.remote_ip,
- 				     remote_ip))
- 			return true;
+ 		if (vxlan->vn_sock != dev->vn_sock)
+ 			continue;
+ 
+ 		if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
+ 				      &dev->default_dst.remote_ip))
+ 			continue;
+ 
+ 		if (vxlan->default_dst.remote_ifindex !=
+ 		    dev->default_dst.remote_ifindex)
+ 			continue;
+ 
+ 		return true;
  	}
  
  	return false;
@@@ -1066,7 -1081,7 +1081,7 @@@ static void vxlan_rcv(struct vxlan_soc
  	struct iphdr *oip = NULL;
  	struct ipv6hdr *oip6 = NULL;
  	struct vxlan_dev *vxlan;
- 	struct pcpu_tstats *stats;
+ 	struct pcpu_sw_netstats *stats;
  	union vxlan_addr saddr;
  	__u32 vni;
  	int err = 0;
@@@ -1390,7 -1405,7 +1405,7 @@@ __be16 vxlan_src_port(__u16 port_min, _
  	unsigned int range = (port_max - port_min) + 1;
  	u32 hash;
  
- 	hash = skb_get_rxhash(skb);
+ 	hash = skb_get_hash(skb);
  	if (!hash)
  		hash = jhash(skb->data, 2 * ETH_ALEN,
  			     (__force u32) skb->protocol);
@@@ -1572,11 -1587,12 +1587,12 @@@ EXPORT_SYMBOL_GPL(vxlan_xmit_skb)
  static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
  			       struct vxlan_dev *dst_vxlan)
  {
- 	struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
- 	struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+ 	struct pcpu_sw_netstats *tx_stats, *rx_stats;
  	union vxlan_addr loopback;
  	union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
  
+ 	tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+ 	rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
  	skb->pkt_type = PACKET_HOST;
  	skb->encapsulation = 0;
  	skb->dev = dst_vxlan->dev;
@@@ -1882,12 -1898,12 +1898,12 @@@ static int vxlan_init(struct net_devic
  	struct vxlan_sock *vs;
  	int i;
  
- 	dev->tstats = alloc_percpu(struct pcpu_tstats);
+ 	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
- 		struct pcpu_tstats *vxlan_stats;
+ 		struct pcpu_sw_netstats *vxlan_stats;
  		vxlan_stats = per_cpu_ptr(dev->tstats, i);
  		u64_stats_init(&vxlan_stats->syncp);
  	}
@@@ -1935,7 -1951,6 +1951,6 @@@ static void vxlan_uninit(struct net_dev
  /* Start ageing timer and join group when device is brought up */
  static int vxlan_open(struct net_device *dev)
  {
- 	struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
  	struct vxlan_dev *vxlan = netdev_priv(dev);
  	struct vxlan_sock *vs = vxlan->vn_sock;
  
@@@ -1943,8 -1958,7 +1958,7 @@@
  	if (!vs)
  		return -ENOTCONN;
  
- 	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- 	    vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+ 	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
  		vxlan_sock_hold(vs);
  		dev_hold(dev);
  		queue_work(vxlan_wq, &vxlan->igmp_join);
@@@ -1983,7 -1997,7 +1997,7 @@@ static int vxlan_stop(struct net_devic
  	struct vxlan_sock *vs = vxlan->vn_sock;
  
  	if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- 	    ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+ 	    !vxlan_group_used(vn, vxlan)) {
  		vxlan_sock_hold(vs);
  		dev_hold(dev);
  		queue_work(vxlan_wq, &vxlan->igmp_leave);
@@@ -2001,6 -2015,29 +2015,29 @@@ static void vxlan_set_multicast_list(st
  {
  }
  
+ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
+ {
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
+ 	struct vxlan_rdst *dst = &vxlan->default_dst;
+ 	struct net_device *lowerdev;
+ 	int max_mtu;
+ 
+ 	lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+ 	if (lowerdev == NULL)
+ 		return eth_change_mtu(dev, new_mtu);
+ 
+ 	if (dst->remote_ip.sa.sa_family == AF_INET6)
+ 		max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
+ 	else
+ 		max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
+ 
+ 	if (new_mtu < 68 || new_mtu > max_mtu)
+ 		return -EINVAL;
+ 
+ 	dev->mtu = new_mtu;
+ 	return 0;
+ }
+ 
  static const struct net_device_ops vxlan_netdev_ops = {
  	.ndo_init		= vxlan_init,
  	.ndo_uninit		= vxlan_uninit,
@@@ -2009,7 -2046,7 +2046,7 @@@
  	.ndo_start_xmit		= vxlan_xmit,
  	.ndo_get_stats64	= ip_tunnel_get_stats64,
  	.ndo_set_rx_mode	= vxlan_set_multicast_list,
- 	.ndo_change_mtu		= eth_change_mtu,
+ 	.ndo_change_mtu		= vxlan_change_mtu,
  	.ndo_validate_addr	= eth_validate_addr,
  	.ndo_set_mac_address	= eth_mac_addr,
  	.ndo_fdb_add		= vxlan_fdb_add,
@@@ -2440,8 -2477,7 +2477,8 @@@ static int vxlan_newlink(struct net *ne
  		/* update header length based on lower device */
  		dev->hard_header_len = lowerdev->hard_header_len +
  				       (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 -	}
 +	} else if (use_ipv6)
 +		vxlan->flags |= VXLAN_F_IPV6;
  
  	if (data[IFLA_VXLAN_TOS])
  		vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --combined drivers/net/wireless/ath/ath9k/ar9002_mac.c
index a366d6b,857ede3..741b38d
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@@ -29,7 -29,8 +29,8 @@@ static void ar9002_hw_set_desc_link(voi
  	((struct ath_desc*) ds)->ds_link = ds_link;
  }
  
- static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+ 			      u32 *sync_cause_p)
  {
  	u32 isr = 0;
  	u32 mask2 = 0;
@@@ -76,16 -77,9 +77,16 @@@
  				mask2 |= ATH9K_INT_CST;
  			if (isr2 & AR_ISR_S2_TSFOOR)
  				mask2 |= ATH9K_INT_TSFOOR;
 +
 +			if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
 +				REG_WRITE(ah, AR_ISR_S2, isr2);
 +				isr &= ~AR_ISR_BCNMISC;
 +			}
  		}
  
 -		isr = REG_READ(ah, AR_ISR_RAC);
 +		if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
 +			isr = REG_READ(ah, AR_ISR_RAC);
 +
  		if (isr == 0xffffffff) {
  			*masked = 0;
  			return false;
@@@ -104,23 -98,11 +105,23 @@@
  
  			*masked |= ATH9K_INT_TX;
  
 -			s0_s = REG_READ(ah, AR_ISR_S0_S);
 +			if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
 +				s0_s = REG_READ(ah, AR_ISR_S0_S);
 +				s1_s = REG_READ(ah, AR_ISR_S1_S);
 +			} else {
 +				s0_s = REG_READ(ah, AR_ISR_S0);
 +				REG_WRITE(ah, AR_ISR_S0, s0_s);
 +				s1_s = REG_READ(ah, AR_ISR_S1);
 +				REG_WRITE(ah, AR_ISR_S1, s1_s);
 +
 +				isr &= ~(AR_ISR_TXOK |
 +					 AR_ISR_TXDESC |
 +					 AR_ISR_TXERR |
 +					 AR_ISR_TXEOL);
 +			}
 +
  			ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
  			ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
 -
 -			s1_s = REG_READ(ah, AR_ISR_S1_S);
  			ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
  			ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
  		}
@@@ -133,15 -115,13 +134,15 @@@
  		*masked |= mask2;
  	}
  
 -	if (AR_SREV_9100(ah))
 -		return true;
 -
 -	if (isr & AR_ISR_GENTMR) {
 +	if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
  		u32 s5_s;
  
 -		s5_s = REG_READ(ah, AR_ISR_S5_S);
 +		if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
 +			s5_s = REG_READ(ah, AR_ISR_S5_S);
 +		} else {
 +			s5_s = REG_READ(ah, AR_ISR_S5);
 +		}
 +
  		ah->intr_gen_timer_trigger =
  				MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
  
@@@ -154,23 -134,11 +155,24 @@@
  		if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
  		    !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  			*masked |= ATH9K_INT_TIM_TIMER;
 +
 +		if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
 +			REG_WRITE(ah, AR_ISR_S5, s5_s);
 +			isr &= ~AR_ISR_GENTMR;
 +		}
  	}
  
 +	if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
 +		REG_WRITE(ah, AR_ISR, isr);
 +		REG_READ(ah, AR_ISR);
 +	}
 +
 +	if (AR_SREV_9100(ah))
 +		return true;
 +
  	if (sync_cause) {
- 		ath9k_debug_sync_cause(common, sync_cause);
+ 		if (sync_cause_p)
+ 			*sync_cause_p = sync_cause;
  		fatal_int =
  			(sync_cause &
  			 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
diff --combined drivers/net/wireless/ath/ath9k/main.c
index 21aa09e,173a889..21b764b
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@@ -82,6 -82,22 +82,22 @@@ static bool ath9k_setpower(struct ath_s
  	return ret;
  }
  
+ void ath_ps_full_sleep(unsigned long data)
+ {
+ 	struct ath_softc *sc = (struct ath_softc *) data;
+ 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ 	bool reset;
+ 
+ 	spin_lock(&common->cc_lock);
+ 	ath_hw_cycle_counters_update(common);
+ 	spin_unlock(&common->cc_lock);
+ 
+ 	ath9k_hw_setrxabort(sc->sc_ah, 1);
+ 	ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
+ 
+ 	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+ }
+ 
  void ath9k_ps_wakeup(struct ath_softc *sc)
  {
  	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@@ -92,6 -108,7 +108,7 @@@
  	if (++sc->ps_usecount != 1)
  		goto unlock;
  
+ 	del_timer_sync(&sc->sleep_timer);
  	power_mode = sc->sc_ah->power_mode;
  	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
  
@@@ -117,17 -134,17 +134,17 @@@ void ath9k_ps_restore(struct ath_softc 
  	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  	enum ath9k_power_mode mode;
  	unsigned long flags;
- 	bool reset;
  
  	spin_lock_irqsave(&sc->sc_pm_lock, flags);
  	if (--sc->ps_usecount != 0)
  		goto unlock;
  
  	if (sc->ps_idle) {
- 		ath9k_hw_setrxabort(sc->sc_ah, 1);
- 		ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
- 		mode = ATH9K_PM_FULL_SLEEP;
- 	} else if (sc->ps_enabled &&
+ 		mod_timer(&sc->sleep_timer, jiffies + HZ / 10);
+ 		goto unlock;
+ 	}
+ 
+ 	if (sc->ps_enabled &&
  		   !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
  				     PS_WAIT_FOR_CAB |
  				     PS_WAIT_FOR_PSPOLL_DATA |
@@@ -163,13 -180,13 +180,13 @@@ static void __ath_cancel_work(struct at
  #endif
  }
  
- static void ath_cancel_work(struct ath_softc *sc)
+ void ath_cancel_work(struct ath_softc *sc)
  {
  	__ath_cancel_work(sc);
  	cancel_work_sync(&sc->hw_reset_work);
  }
  
- static void ath_restart_work(struct ath_softc *sc)
+ void ath_restart_work(struct ath_softc *sc)
  {
  	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
  
@@@ -487,8 -504,13 +504,13 @@@ void ath9k_tasklet(unsigned long data
  			ath_tx_edma_tasklet(sc);
  		else
  			ath_tx_tasklet(sc);
+ 
+ 		wake_up(&sc->tx_wait);
  	}
  
+ 	if (status & ATH9K_INT_GENTIMER)
+ 		ath_gen_timer_isr(sc->sc_ah);
+ 
  	ath9k_btcoex_handle_interrupt(sc, status);
  
  	/* re-enable hardware interrupt */
@@@ -519,6 -541,7 +541,7 @@@ irqreturn_t ath_isr(int irq, void *dev
  	struct ath_hw *ah = sc->sc_ah;
  	struct ath_common *common = ath9k_hw_common(ah);
  	enum ath9k_int status;
+ 	u32 sync_cause;
  	bool sched = false;
  
  	/*
@@@ -545,7 -568,8 +568,8 @@@
  	 * bits we haven't explicitly enabled so we mask the
  	 * value to insure we only process bits we requested.
  	 */
- 	ath9k_hw_getisr(ah, &status);	/* NB: clears ISR too */
+ 	ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
+ 	ath9k_debug_sync_cause(sc, sync_cause);
  	status &= ah->imask;	/* discard unasked-for bits */
  
  	/*
@@@ -579,7 -603,8 +603,8 @@@
  
  		goto chip_reset;
  	}
- #ifdef CONFIG_PM_SLEEP
+ 
+ #ifdef CONFIG_ATH9K_WOW
  	if (status & ATH9K_INT_BMISS) {
  		if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
  			ath_dbg(common, ANY, "during WoW we got a BMISS\n");
@@@ -588,6 -613,8 +613,8 @@@
  		}
  	}
  #endif
+ 
+ 
  	if (status & ATH9K_INT_SWBA)
  		tasklet_schedule(&sc->bcon_tasklet);
  
@@@ -627,7 -654,7 +654,7 @@@ chip_reset
  #undef SCHED_INTR
  }
  
- static int ath_reset(struct ath_softc *sc)
+ int ath_reset(struct ath_softc *sc)
  {
  	int r;
  
@@@ -735,6 -762,8 +762,8 @@@ static int ath9k_start(struct ieee80211
  	 */
  	ath9k_cmn_init_crypto(sc->sc_ah);
  
+ 	ath9k_hw_reset_tsf(ah);
+ 
  	spin_unlock_bh(&sc->sc_pcu_lock);
  
  	mutex_unlock(&sc->mutex);
@@@ -965,9 -994,8 +994,9 @@@ void ath9k_calculate_iter_data(struct i
  	struct ath_common *common = ath9k_hw_common(ah);
  
  	/*
 -	 * Use the hardware MAC address as reference, the hardware uses it
 -	 * together with the BSSID mask when matching addresses.
 +	 * Pick the MAC address of the first interface as the new hardware
 +	 * MAC address. The hardware will use it together with the BSSID mask
 +	 * when matching addresses.
  	 */
  	memset(iter_data, 0, sizeof(*iter_data));
  	memset(&iter_data->mask, 0xff, ETH_ALEN);
@@@ -1636,13 -1664,8 +1665,8 @@@ static void ath9k_bss_info_changed(stru
  	}
  
  	if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
- 	    (changed & BSS_CHANGED_BEACON_INT)) {
- 		if (ah->opmode == NL80211_IFTYPE_AP &&
- 		    bss_conf->enable_beacon)
- 			ath9k_set_tsfadjust(sc, vif);
- 		if (ath9k_allow_beacon_config(sc, vif))
- 			ath9k_beacon_config(sc, vif, changed);
- 	}
+ 	    (changed & BSS_CHANGED_BEACON_INT))
+ 		ath9k_beacon_config(sc, vif, changed);
  
  	if (changed & BSS_CHANGED_ERP_SLOT) {
  		if (bss_conf->use_short_slot)
@@@ -1818,13 -1841,31 +1842,31 @@@ static void ath9k_set_coverage_class(st
  	mutex_unlock(&sc->mutex);
  }
  
+ static bool ath9k_has_tx_pending(struct ath_softc *sc)
+ {
+ 	int i, npend;
+ 
+ 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ 		if (!ATH_TXQ_SETUP(sc, i))
+ 			continue;
+ 
+ 		if (!sc->tx.txq[i].axq_depth)
+ 			continue;
+ 
+ 		npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
+ 		if (npend)
+ 			break;
+ 	}
+ 
+ 	return !!npend;
+ }
+ 
  static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
  {
  	struct ath_softc *sc = hw->priv;
  	struct ath_hw *ah = sc->sc_ah;
  	struct ath_common *common = ath9k_hw_common(ah);
- 	int timeout = 200; /* ms */
- 	int i, j;
+ 	int timeout = HZ / 5; /* 200 ms */
  	bool drain_txq;
  
  	mutex_lock(&sc->mutex);
@@@ -1842,25 -1883,9 +1884,9 @@@
  		return;
  	}
  
- 	for (j = 0; j < timeout; j++) {
- 		bool npend = false;
- 
- 		if (j)
- 			usleep_range(1000, 2000);
- 
- 		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- 			if (!ATH_TXQ_SETUP(sc, i))
- 				continue;
- 
- 			npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
- 
- 			if (npend)
- 				break;
- 		}
- 
- 		if (!npend)
- 		    break;
- 	}
+ 	if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc),
+ 			       timeout) > 0)
+ 		drop = false;
  
  	if (drop) {
  		ath9k_ps_wakeup(sc);
@@@ -2022,333 -2047,6 +2048,6 @@@ static int ath9k_get_antenna(struct iee
  	return 0;
  }
  
- #ifdef CONFIG_PM_SLEEP
- 
- static void ath9k_wow_map_triggers(struct ath_softc *sc,
- 				   struct cfg80211_wowlan *wowlan,
- 				   u32 *wow_triggers)
- {
- 	if (wowlan->disconnect)
- 		*wow_triggers |= AH_WOW_LINK_CHANGE |
- 				 AH_WOW_BEACON_MISS;
- 	if (wowlan->magic_pkt)
- 		*wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
- 
- 	if (wowlan->n_patterns)
- 		*wow_triggers |= AH_WOW_USER_PATTERN_EN;
- 
- 	sc->wow_enabled = *wow_triggers;
- 
- }
- 
- static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
- {
- 	struct ath_hw *ah = sc->sc_ah;
- 	struct ath_common *common = ath9k_hw_common(ah);
- 	int pattern_count = 0;
- 	int i, byte_cnt;
- 	u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
- 	u8 dis_deauth_mask[MAX_PATTERN_SIZE];
- 
- 	memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
- 	memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
- 
- 	/*
- 	 * Create Dissassociate / Deauthenticate packet filter
- 	 *
- 	 *     2 bytes        2 byte    6 bytes   6 bytes  6 bytes
- 	 *  +--------------+----------+---------+--------+--------+----
- 	 *  + Frame Control+ Duration +   DA    +  SA    +  BSSID +
- 	 *  +--------------+----------+---------+--------+--------+----
- 	 *
- 	 * The above is the management frame format for disassociate/
- 	 * deauthenticate pattern, from this we need to match the first byte
- 	 * of 'Frame Control' and DA, SA, and BSSID fields
- 	 * (skipping 2nd byte of FC and Duration feild.
- 	 *
- 	 * Disassociate pattern
- 	 * --------------------
- 	 * Frame control = 00 00 1010
- 	 * DA, SA, BSSID = x:x:x:x:x:x
- 	 * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
- 	 *			    | x:x:x:x:x:x  -- 22 bytes
- 	 *
- 	 * Deauthenticate pattern
- 	 * ----------------------
- 	 * Frame control = 00 00 1100
- 	 * DA, SA, BSSID = x:x:x:x:x:x
- 	 * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
- 	 *			    | x:x:x:x:x:x  -- 22 bytes
- 	 */
- 
- 	/* Create Disassociate Pattern first */
- 
- 	byte_cnt = 0;
- 
- 	/* Fill out the mask with all FF's */
- 
- 	for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
- 		dis_deauth_mask[i] = 0xff;
- 
- 	/* copy the first byte of frame control field */
- 	dis_deauth_pattern[byte_cnt] = 0xa0;
- 	byte_cnt++;
- 
- 	/* skip 2nd byte of frame control and Duration field */
- 	byte_cnt += 3;
- 
- 	/*
- 	 * need not match the destination mac address, it can be a broadcast
- 	 * mac address or an unicast to this station
- 	 */
- 	byte_cnt += 6;
- 
- 	/* copy the source mac address */
- 	memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
- 
- 	byte_cnt += 6;
- 
- 	/* copy the bssid, its same as the source mac address */
- 
- 	memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
- 
- 	/* Create Disassociate pattern mask */
- 
- 	dis_deauth_mask[0] = 0xfe;
- 	dis_deauth_mask[1] = 0x03;
- 	dis_deauth_mask[2] = 0xc0;
- 
- 	ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
- 
- 	ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
- 				   pattern_count, byte_cnt);
- 
- 	pattern_count++;
- 	/*
- 	 * for de-authenticate pattern, only the first byte of the frame
- 	 * control field gets changed from 0xA0 to 0xC0
- 	 */
- 	dis_deauth_pattern[0] = 0xC0;
- 
- 	ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
- 				   pattern_count, byte_cnt);
- 
- }
- 
- static void ath9k_wow_add_pattern(struct ath_softc *sc,
- 				  struct cfg80211_wowlan *wowlan)
- {
- 	struct ath_hw *ah = sc->sc_ah;
- 	struct ath9k_wow_pattern *wow_pattern = NULL;
- 	struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
- 	int mask_len;
- 	s8 i = 0;
- 
- 	if (!wowlan->n_patterns)
- 		return;
- 
- 	/*
- 	 * Add the new user configured patterns
- 	 */
- 	for (i = 0; i < wowlan->n_patterns; i++) {
- 
- 		wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
- 
- 		if (!wow_pattern)
- 			return;
- 
- 		/*
- 		 * TODO: convert the generic user space pattern to
- 		 * appropriate chip specific/802.11 pattern.
- 		 */
- 
- 		mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
- 		memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
- 		memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
- 		memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
- 		       patterns[i].pattern_len);
- 		memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
- 		wow_pattern->pattern_len = patterns[i].pattern_len;
- 
- 		/*
- 		 * just need to take care of deauth and disssoc pattern,
- 		 * make sure we don't overwrite them.
- 		 */
- 
- 		ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
- 					   wow_pattern->mask_bytes,
- 					   i + 2,
- 					   wow_pattern->pattern_len);
- 		kfree(wow_pattern);
- 
- 	}
- 
- }
- 
- static int ath9k_suspend(struct ieee80211_hw *hw,
- 			 struct cfg80211_wowlan *wowlan)
- {
- 	struct ath_softc *sc = hw->priv;
- 	struct ath_hw *ah = sc->sc_ah;
- 	struct ath_common *common = ath9k_hw_common(ah);
- 	u32 wow_triggers_enabled = 0;
- 	int ret = 0;
- 
- 	mutex_lock(&sc->mutex);
- 
- 	ath_cancel_work(sc);
- 	ath_stop_ani(sc);
- 	del_timer_sync(&sc->rx_poll_timer);
- 
- 	if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
- 		ath_dbg(common, ANY, "Device not present\n");
- 		ret = -EINVAL;
- 		goto fail_wow;
- 	}
- 
- 	if (WARN_ON(!wowlan)) {
- 		ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
- 		ret = -EINVAL;
- 		goto fail_wow;
- 	}
- 
- 	if (!device_can_wakeup(sc->dev)) {
- 		ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
- 		ret = 1;
- 		goto fail_wow;
- 	}
- 
- 	/*
- 	 * none of the sta vifs are associated
- 	 * and we are not currently handling multivif
- 	 * cases, for instance we have to seperately
- 	 * configure 'keep alive frame' for each
- 	 * STA.
- 	 */
- 
- 	if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
- 		ath_dbg(common, WOW, "None of the STA vifs are associated\n");
- 		ret = 1;
- 		goto fail_wow;
- 	}
- 
- 	if (sc->nvifs > 1) {
- 		ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
- 		ret = 1;
- 		goto fail_wow;
- 	}
- 
- 	ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
- 
- 	ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
- 		wow_triggers_enabled);
- 
- 	ath9k_ps_wakeup(sc);
- 
- 	ath9k_stop_btcoex(sc);
- 
- 	/*
- 	 * Enable wake up on recieving disassoc/deauth
- 	 * frame by default.
- 	 */
- 	ath9k_wow_add_disassoc_deauth_pattern(sc);
- 
- 	if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
- 		ath9k_wow_add_pattern(sc, wowlan);
- 
- 	spin_lock_bh(&sc->sc_pcu_lock);
- 	/*
- 	 * To avoid false wake, we enable beacon miss interrupt only
- 	 * when we go to sleep. We save the current interrupt mask
- 	 * so we can restore it after the system wakes up
- 	 */
- 	sc->wow_intr_before_sleep = ah->imask;
- 	ah->imask &= ~ATH9K_INT_GLOBAL;
- 	ath9k_hw_disable_interrupts(ah);
- 	ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
- 	ath9k_hw_set_interrupts(ah);
- 	ath9k_hw_enable_interrupts(ah);
- 
- 	spin_unlock_bh(&sc->sc_pcu_lock);
- 
- 	/*
- 	 * we can now sync irq and kill any running tasklets, since we already
- 	 * disabled interrupts and not holding a spin lock
- 	 */
- 	synchronize_irq(sc->irq);
- 	tasklet_kill(&sc->intr_tq);
- 
- 	ath9k_hw_wow_enable(ah, wow_triggers_enabled);
- 
- 	ath9k_ps_restore(sc);
- 	ath_dbg(common, ANY, "WoW enabled in ath9k\n");
- 	atomic_inc(&sc->wow_sleep_proc_intr);
- 
- fail_wow:
- 	mutex_unlock(&sc->mutex);
- 	return ret;
- }
- 
- static int ath9k_resume(struct ieee80211_hw *hw)
- {
- 	struct ath_softc *sc = hw->priv;
- 	struct ath_hw *ah = sc->sc_ah;
- 	struct ath_common *common = ath9k_hw_common(ah);
- 	u32 wow_status;
- 
- 	mutex_lock(&sc->mutex);
- 
- 	ath9k_ps_wakeup(sc);
- 
- 	spin_lock_bh(&sc->sc_pcu_lock);
- 
- 	ath9k_hw_disable_interrupts(ah);
- 	ah->imask = sc->wow_intr_before_sleep;
- 	ath9k_hw_set_interrupts(ah);
- 	ath9k_hw_enable_interrupts(ah);
- 
- 	spin_unlock_bh(&sc->sc_pcu_lock);
- 
- 	wow_status = ath9k_hw_wow_wakeup(ah);
- 
- 	if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
- 		/*
- 		 * some devices may not pick beacon miss
- 		 * as the reason they woke up so we add
- 		 * that here for that shortcoming.
- 		 */
- 		wow_status |= AH_WOW_BEACON_MISS;
- 		atomic_dec(&sc->wow_got_bmiss_intr);
- 		ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
- 	}
- 
- 	atomic_dec(&sc->wow_sleep_proc_intr);
- 
- 	if (wow_status) {
- 		ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
- 			ath9k_hw_wow_event_to_string(wow_status), wow_status);
- 	}
- 
- 	ath_restart_work(sc);
- 	ath9k_start_btcoex(sc);
- 
- 	ath9k_ps_restore(sc);
- 	mutex_unlock(&sc->mutex);
- 
- 	return 0;
- }
- 
- static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
- {
- 	struct ath_softc *sc = hw->priv;
- 
- 	mutex_lock(&sc->mutex);
- 	device_init_wakeup(sc->dev, 1);
- 	device_set_wakeup_enable(sc->dev, enabled);
- 	mutex_unlock(&sc->mutex);
- }
- 
- #endif
  static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
  {
  	struct ath_softc *sc = hw->priv;
@@@ -2374,134 -2072,6 +2073,6 @@@ static void ath9k_channel_switch_beacon
  	sc->csa_vif = vif;
  }
  
- static void ath9k_tx99_stop(struct ath_softc *sc)
- {
- 	struct ath_hw *ah = sc->sc_ah;
- 	struct ath_common *common = ath9k_hw_common(ah);
- 
- 	ath_drain_all_txq(sc);
- 	ath_startrecv(sc);
- 
- 	ath9k_hw_set_interrupts(ah);
- 	ath9k_hw_enable_interrupts(ah);
- 
- 	ieee80211_wake_queues(sc->hw);
- 
- 	kfree_skb(sc->tx99_skb);
- 	sc->tx99_skb = NULL;
- 	sc->tx99_state = false;
- 
- 	ath9k_hw_tx99_stop(sc->sc_ah);
- 	ath_dbg(common, XMIT, "TX99 stopped\n");
- }
- 
- static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
- {
- 	static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
- 			       0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
- 			       0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
- 			       0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
- 			       0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
- 			       0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
- 			       0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
- 			       0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
- 	u32 len = 1200;
- 	struct ieee80211_hw *hw = sc->hw;
- 	struct ieee80211_hdr *hdr;
- 	struct ieee80211_tx_info *tx_info;
- 	struct sk_buff *skb;
- 
- 	skb = alloc_skb(len, GFP_KERNEL);
- 	if (!skb)
- 		return NULL;
- 
- 	skb_put(skb, len);
- 
- 	memset(skb->data, 0, len);
- 
- 	hdr = (struct ieee80211_hdr *)skb->data;
- 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
- 	hdr->duration_id = 0;
- 
- 	memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
- 	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
- 	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
- 
- 	hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
- 
- 	tx_info = IEEE80211_SKB_CB(skb);
- 	memset(tx_info, 0, sizeof(*tx_info));
- 	tx_info->band = hw->conf.chandef.chan->band;
- 	tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
- 	tx_info->control.vif = sc->tx99_vif;
- 
- 	memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
- 
- 	return skb;
- }
- 
- void ath9k_tx99_deinit(struct ath_softc *sc)
- {
- 	ath_reset(sc);
- 
- 	ath9k_ps_wakeup(sc);
- 	ath9k_tx99_stop(sc);
- 	ath9k_ps_restore(sc);
- }
- 
- int ath9k_tx99_init(struct ath_softc *sc)
- {
- 	struct ieee80211_hw *hw = sc->hw;
- 	struct ath_hw *ah = sc->sc_ah;
- 	struct ath_common *common = ath9k_hw_common(ah);
- 	struct ath_tx_control txctl;
- 	int r;
- 
- 	if (sc->sc_flags & SC_OP_INVALID) {
- 		ath_err(common,
- 			"driver is in invalid state unable to use TX99");
- 		return -EINVAL;
- 	}
- 
- 	sc->tx99_skb = ath9k_build_tx99_skb(sc);
- 	if (!sc->tx99_skb)
- 		return -ENOMEM;
- 
- 	memset(&txctl, 0, sizeof(txctl));
- 	txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
- 
- 	ath_reset(sc);
- 
- 	ath9k_ps_wakeup(sc);
- 
- 	ath9k_hw_disable_interrupts(ah);
- 	atomic_set(&ah->intr_ref_cnt, -1);
- 	ath_drain_all_txq(sc);
- 	ath_stoprecv(sc);
- 
- 	sc->tx99_state = true;
- 
- 	ieee80211_stop_queues(hw);
- 
- 	if (sc->tx99_power == MAX_RATE_POWER + 1)
- 		sc->tx99_power = MAX_RATE_POWER;
- 
- 	ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
- 	r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
- 	if (r) {
- 		ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
- 		return r;
- 	}
- 
- 	ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
- 		sc->tx99_power,
- 		sc->tx99_power / 2);
- 
- 	/* We leave the harware awake as it will be chugging on */
- 
- 	return 0;
- }
- 
  struct ieee80211_ops ath9k_ops = {
  	.tx 		    = ath9k_tx,
  	.start 		    = ath9k_start,
@@@ -2532,7 -2102,7 +2103,7 @@@
  	.set_antenna	    = ath9k_set_antenna,
  	.get_antenna	    = ath9k_get_antenna,
  
- #ifdef CONFIG_PM_SLEEP
+ #ifdef CONFIG_ATH9K_WOW
  	.suspend	    = ath9k_suspend,
  	.resume		    = ath9k_resume,
  	.set_wakeup	    = ath9k_set_wakeup,
diff --combined drivers/net/wireless/rtlwifi/pci.c
index 5a53195,8707d1a..d7aa165
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@@ -688,8 -688,6 +688,6 @@@ static void _rtl_receive_one(struct iee
  		rtlpriv->stats.rxbytesunicast += skb->len;
  	}
  
- 	rtl_is_special_data(hw, skb, false);
- 
  	if (ieee80211_is_data(fc)) {
  		rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
  
@@@ -740,8 -738,6 +738,8 @@@ static void _rtl_pci_rx_interrupt(struc
  	};
  	int index = rtlpci->rx_ring[rx_queue_idx].idx;
  
 +	if (rtlpci->driver_is_goingto_unload)
 +		return;
  	/*RX NORMAL PKT */
  	while (count--) {
  		/*rx descriptor */
@@@ -1638,7 -1634,6 +1636,7 @@@ static void rtl_pci_stop(struct ieee802
  	 */
  	set_hal_stop(rtlhal);
  
 +	rtlpci->driver_is_goingto_unload = true;
  	rtlpriv->cfg->ops->disable_interrupt(hw);
  	cancel_work_sync(&rtlpriv->works.lps_change_work);
  
@@@ -1656,6 -1651,7 +1654,6 @@@
  	ppsc->rfchange_inprogress = true;
  	spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
  
 -	rtlpci->driver_is_goingto_unload = true;
  	rtlpriv->cfg->ops->hw_disable(hw);
  	/* some things are not needed if firmware not available */
  	if (!rtlpriv->max_fw_size)
diff --combined drivers/net/xen-netback/common.h
index c47794b,ba30a6d..c955fc3
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@@ -101,13 -101,6 +101,13 @@@ struct xenvif_rx_meta 
  
  #define MAX_PENDING_REQS 256
  
 +/* It's possible for an skb to have a maximal number of frags
 + * but still be less than MAX_BUFFER_OFFSET in size. Thus the
 + * worst-case number of copy operations is MAX_SKB_FRAGS per
 + * ring slot.
 + */
 +#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
 +
  struct xenvif {
  	/* Unique identifier for this interface. */
  	domid_t          domid;
@@@ -143,20 -136,18 +143,18 @@@
  	char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
  	struct xen_netif_rx_back_ring rx;
  	struct sk_buff_head rx_queue;
- 
- 	/* Allow xenvif_start_xmit() to peek ahead in the rx request
- 	 * ring.  This is a prediction of what rx_req_cons will be
- 	 * once all queued skbs are put on the ring.
+ 	/* Set when the RX interrupt is triggered by the frontend.
+ 	 * The worker thread may need to wake the queue.
  	 */
- 	RING_IDX rx_req_cons_peek;
+ 	bool rx_event;
  
 -	/* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
 -	 * head/fragment page uses 2 copy operations because it
 -	 * straddles two buffers in the frontend.
 -	 */
 -	struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
 -	struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
 +	/* This array is allocated seperately as it is large */
 +	struct gnttab_copy *grant_copy_op;
  
 +	/* We create one meta structure per ring request we consume, so
 +	 * the maximum number is the same as the ring size.
 +	 */
 +	struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
  
  	u8               fe_dev_addr[6];
  
@@@ -205,8 -196,6 +203,6 @@@ void xenvif_xenbus_fini(void)
  
  int xenvif_schedulable(struct xenvif *vif);
  
- int xenvif_rx_ring_full(struct xenvif *vif);
- 
  int xenvif_must_stop_queue(struct xenvif *vif);
  
  /* (Un)Map communication rings. */
@@@ -218,21 -207,20 +214,20 @@@ int xenvif_map_frontend_rings(struct xe
  /* Check for SKBs from frontend and schedule backend processing */
  void xenvif_check_rx_xenvif(struct xenvif *vif);
  
  /* Prevent the device from generating any further traffic. */
  void xenvif_carrier_off(struct xenvif *vif);
  
- /* Returns number of ring slots required to send an skb to the frontend */
- unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
- 
  int xenvif_tx_action(struct xenvif *vif, int budget);
- void xenvif_rx_action(struct xenvif *vif);
  
  int xenvif_kthread(void *data);
+ void xenvif_kick_thread(struct xenvif *vif);
+ 
+ /* Determine whether the needed number of slots (req) are available,
+  * and set req_event if not.
+  */
+ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
+ 
+ void xenvif_stop_queue(struct xenvif *vif);
  
  extern bool separate_tx_rx_irq;
  
diff --combined drivers/net/xen-netback/interface.c
index 34ca4e5,1dcb960..06c6fee
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@@ -46,11 -46,6 +46,6 @@@ int xenvif_schedulable(struct xenvif *v
  	return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
  }
  
- static int xenvif_rx_schedulable(struct xenvif *vif)
- {
- 	return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
- }
- 
  static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
  {
  	struct xenvif *vif = dev_id;
@@@ -104,8 -99,8 +99,8 @@@ static irqreturn_t xenvif_rx_interrupt(
  {
  	struct xenvif *vif = dev_id;
  
- 	if (xenvif_rx_schedulable(vif))
- 		netif_wake_queue(vif->dev);
+ 	vif->rx_event = true;
+ 	xenvif_kick_thread(vif);
  
  	return IRQ_HANDLED;
  }
@@@ -121,24 -116,35 +116,35 @@@ static irqreturn_t xenvif_interrupt(in
  static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
  {
  	struct xenvif *vif = netdev_priv(dev);
+ 	int min_slots_needed;
  
  	BUG_ON(skb->dev != dev);
  
  	/* Drop the packet if vif is not ready */
- 	if (vif->task == NULL)
+ 	if (vif->task == NULL || !xenvif_schedulable(vif))
  		goto drop;
  
- 	/* Drop the packet if the target domain has no receive buffers. */
- 	if (!xenvif_rx_schedulable(vif))
- 		goto drop;
+ 	/* At best we'll need one slot for the header and one for each
+ 	 * frag.
+ 	 */
+ 	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
  
- 	/* Reserve ring slots for the worst-case number of fragments. */
- 	vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
+ 	/* If the skb is GSO then we'll also need an extra slot for the
+ 	 * metadata.
+ 	 */
+ 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ 	    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ 		min_slots_needed++;
  
- 	if (vif->can_queue && xenvif_must_stop_queue(vif))
- 		netif_stop_queue(dev);
+ 	/* If the skb can't possibly fit in the remaining slots
+ 	 * then turn off the queue to give the ring a chance to
+ 	 * drain.
+ 	 */
+ 	if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
+ 		xenvif_stop_queue(vif);
  
- 	xenvif_queue_tx_skb(vif, skb);
+ 	skb_queue_tail(&vif->rx_queue, skb);
+ 	xenvif_kick_thread(vif);
  
  	return NETDEV_TX_OK;
  
@@@ -148,12 -154,6 +154,6 @@@
  	return NETDEV_TX_OK;
  }
  
- void xenvif_notify_tx_completion(struct xenvif *vif)
- {
- 	if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
- 		netif_wake_queue(vif->dev);
- }
- 
  static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
  {
  	struct xenvif *vif = netdev_priv(dev);
@@@ -307,15 -307,6 +307,15 @@@ struct xenvif *xenvif_alloc(struct devi
  	SET_NETDEV_DEV(dev, parent);
  
  	vif = netdev_priv(dev);
 +
 +	vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
 +				     MAX_GRANT_COPY_OPS);
 +	if (vif->grant_copy_op == NULL) {
 +		pr_warn("Could not allocate grant copy space for %s\n", name);
 +		free_netdev(dev);
 +		return ERR_PTR(-ENOMEM);
 +	}
 +
  	vif->domid  = domid;
  	vif->handle = handle;
  	vif->can_sg = 1;
@@@ -387,6 -378,8 +387,8 @@@ int xenvif_connect(struct xenvif *vif, 
  	if (err < 0)
  		goto err;
  
+ 	init_waitqueue_head(&vif->wq);
+ 
  	if (tx_evtchn == rx_evtchn) {
  		/* feature-split-event-channels == 0 */
  		err = bind_interdomain_evtchn_to_irqhandler(
@@@ -419,7 -412,6 +421,6 @@@
  		disable_irq(vif->rx_irq);
  	}
  
- 	init_waitqueue_head(&vif->wq);
  	task = kthread_create(xenvif_kthread,
  			      (void *)vif, "%s", vif->dev->name);
  	if (IS_ERR(task)) {
@@@ -496,7 -488,6 +497,7 @@@ void xenvif_free(struct xenvif *vif
  
  	unregister_netdev(vif->dev);
  
 +	vfree(vif->grant_copy_op);
  	free_netdev(vif->dev);
  
  	module_put(THIS_MODULE);
diff --combined drivers/net/xen-netback/netback.c
index 7842555,611aebe..4f81ac0
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@@ -138,36 -138,26 +138,26 @@@ static inline pending_ring_idx_t nr_pen
  		vif->pending_prod + vif->pending_cons;
  }
  
- static int max_required_rx_slots(struct xenvif *vif)
+ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
  {
- 	int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+ 	RING_IDX prod, cons;
  
- 	/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- 	if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
- 		max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
- 
- 	return max;
- }
- 
- int xenvif_rx_ring_full(struct xenvif *vif)
- {
- 	RING_IDX peek   = vif->rx_req_cons_peek;
- 	RING_IDX needed = max_required_rx_slots(vif);
+ 	do {
+ 		prod = vif->rx.sring->req_prod;
+ 		cons = vif->rx.req_cons;
  
- 	return ((vif->rx.sring->req_prod - peek) < needed) ||
- 	       ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
- }
+ 		if (prod - cons >= needed)
+ 			return true;
  
- int xenvif_must_stop_queue(struct xenvif *vif)
- {
- 	if (!xenvif_rx_ring_full(vif))
- 		return 0;
+ 		vif->rx.sring->req_event = prod + 1;
  
- 	vif->rx.sring->req_event = vif->rx_req_cons_peek +
- 		max_required_rx_slots(vif);
- 	mb(); /* request notification /then/ check the queue */
+ 		/* Make sure event is visible before we check prod
+ 		 * again.
+ 		 */
+ 		mb();
+ 	} while (vif->rx.sring->req_prod != prod);
  
- 	return xenvif_rx_ring_full(vif);
+ 	return false;
  }
  
  /*
@@@ -210,93 -200,6 +200,6 @@@ static bool start_new_rx_buffer(int off
  	return false;
  }
  
- struct xenvif_count_slot_state {
- 	unsigned long copy_off;
- 	bool head;
- };
- 
- unsigned int xenvif_count_frag_slots(struct xenvif *vif,
- 				     unsigned long offset, unsigned long size,
- 				     struct xenvif_count_slot_state *state)
- {
- 	unsigned count = 0;
- 
- 	offset &= ~PAGE_MASK;
- 
- 	while (size > 0) {
- 		unsigned long bytes;
- 
- 		bytes = PAGE_SIZE - offset;
- 
- 		if (bytes > size)
- 			bytes = size;
- 
- 		if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
- 			count++;
- 			state->copy_off = 0;
- 		}
- 
- 		if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
- 			bytes = MAX_BUFFER_OFFSET - state->copy_off;
- 
- 		state->copy_off += bytes;
- 
- 		offset += bytes;
- 		size -= bytes;
- 
- 		if (offset == PAGE_SIZE)
- 			offset = 0;
- 
- 		state->head = false;
- 	}
- 
- 	return count;
- }
- 
- /*
-  * Figure out how many ring slots we're going to need to send @skb to
-  * the guest. This function is essentially a dry run of
-  * xenvif_gop_frag_copy.
-  */
- unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
- {
- 	struct xenvif_count_slot_state state;
- 	unsigned int count;
- 	unsigned char *data;
- 	unsigned i;
- 
- 	state.head = true;
- 	state.copy_off = 0;
- 
- 	/* Slot for the first (partial) page of data. */
- 	count = 1;
- 
- 	/* Need a slot for the GSO prefix for GSO extra data? */
- 	if (skb_shinfo(skb)->gso_size)
- 		count++;
- 
- 	data = skb->data;
- 	while (data < skb_tail_pointer(skb)) {
- 		unsigned long offset = offset_in_page(data);
- 		unsigned long size = PAGE_SIZE - offset;
- 
- 		if (data + size > skb_tail_pointer(skb))
- 			size = skb_tail_pointer(skb) - data;
- 
- 		count += xenvif_count_frag_slots(vif, offset, size, &state);
- 
- 		data += size;
- 	}
- 
- 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- 		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- 		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
- 
- 		count += xenvif_count_frag_slots(vif, offset, size, &state);
- 	}
- 	return count;
- }
- 
  struct netrx_pending_operations {
  	unsigned copy_prod, copy_cons;
  	unsigned meta_prod, meta_cons;
@@@ -557,12 -460,12 +460,12 @@@ struct skb_cb_overlay 
  	int meta_slots_used;
  };
  
- static void xenvif_kick_thread(struct xenvif *vif)
+ void xenvif_kick_thread(struct xenvif *vif)
  {
  	wake_up(&vif->wq);
  }
  
- void xenvif_rx_action(struct xenvif *vif)
+ static void xenvif_rx_action(struct xenvif *vif)
  {
  	s8 status;
  	u16 flags;
@@@ -571,8 -474,6 +474,6 @@@
  	struct sk_buff *skb;
  	LIST_HEAD(notify);
  	int ret;
- 	int nr_frags;
- 	int count;
  	unsigned long offset;
  	struct skb_cb_overlay *sco;
  	int need_to_notify = 0;
@@@ -584,38 -485,51 +485,51 @@@
  
  	skb_queue_head_init(&rxq);
  
- 	count = 0;
- 
  	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
- 		vif = netdev_priv(skb->dev);
- 		nr_frags = skb_shinfo(skb)->nr_frags;
+ 		int max_slots_needed;
+ 		int i;
+ 
+ 		/* We need a cheap worse case estimate for the number of
+ 		 * slots we'll use.
+ 		 */
+ 
+ 		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
+ 						skb_headlen(skb),
+ 						PAGE_SIZE);
+ 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ 			unsigned int size;
+ 			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ 			max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ 		}
+ 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ 		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ 			max_slots_needed++;
+ 
+ 		/* If the skb may not fit then bail out now */
+ 		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
+ 			skb_queue_head(&vif->rx_queue, skb);
+ 			need_to_notify = 1;
+ 			break;
+ 		}
  
  		sco = (struct skb_cb_overlay *)skb->cb;
  		sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
- 
- 		count += nr_frags + 1;
+ 		BUG_ON(sco->meta_slots_used > max_slots_needed);
  
  		__skb_queue_tail(&rxq, skb);
- 
- 		/* Filled the batch queue? */
- 		/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- 		if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
- 			break;
  	}
  
  	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
  
  	if (!npo.copy_prod)
- 		return;
+ 		goto done;
  
 -	BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
 +	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
  	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
  
  	while ((skb = __skb_dequeue(&rxq)) != NULL) {
  		sco = (struct skb_cb_overlay *)skb->cb;
  
- 		vif = netdev_priv(skb->dev);
- 
  		if ((1 << vif->meta[npo.meta_cons].gso_type) &
  		    vif->gso_prefix_mask) {
  			resp = RING_GET_RESPONSE(&vif->rx,
@@@ -681,25 -595,13 +595,13 @@@
  		if (ret)
  			need_to_notify = 1;
  
- 		xenvif_notify_tx_completion(vif);
- 
  		npo.meta_cons += sco->meta_slots_used;
  		dev_kfree_skb(skb);
  	}
  
+ done:
  	if (need_to_notify)
  		notify_remote_via_irq(vif->rx_irq);
- 
- 	/* More work to do? */
- 	if (!skb_queue_empty(&vif->rx_queue))
- 		xenvif_kick_thread(vif);
- }
- 
- void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
- {
- 	skb_queue_tail(&vif->rx_queue, skb);
- 
- 	xenvif_kick_thread(vif);
  }
  
  void xenvif_check_rx_xenvif(struct xenvif *vif)
@@@ -1141,10 -1043,7 +1043,7 @@@ static int xenvif_set_skb_gso(struct xe
  	}
  
  	skb_shinfo(skb)->gso_size = gso->u.gso.size;
- 
- 	/* Header must be checked, and gso_segs computed. */
- 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- 	skb_shinfo(skb)->gso_segs = 0;
+ 	/* gso_segs will be calculated later */
  
  	return 0;
  }
@@@ -1209,10 -1108,8 +1108,10 @@@ static int checksum_setup_ip(struct xen
  			goto out;
  
  		if (!skb_partial_csum_set(skb, off,
 -					  offsetof(struct tcphdr, check)))
 +					  offsetof(struct tcphdr, check))) {
 +			err = -EPROTO;
  			goto out;
 +		}
  
  		if (recalculate_partial_csum)
  			tcp_hdr(skb)->check =
@@@ -1229,10 -1126,8 +1128,10 @@@
  			goto out;
  
  		if (!skb_partial_csum_set(skb, off,
 -					  offsetof(struct udphdr, check)))
 +					  offsetof(struct udphdr, check))) {
 +			err = -EPROTO;
  			goto out;
 +		}
  
  		if (recalculate_partial_csum)
  			udp_hdr(skb)->check =
@@@ -1354,10 -1249,8 +1253,10 @@@ static int checksum_setup_ipv6(struct x
  			goto out;
  
  		if (!skb_partial_csum_set(skb, off,
 -					  offsetof(struct tcphdr, check)))
 +					  offsetof(struct tcphdr, check))) {
 +			err = -EPROTO;
  			goto out;
 +		}
  
  		if (recalculate_partial_csum)
  			tcp_hdr(skb)->check =
@@@ -1374,10 -1267,8 +1273,10 @@@
  			goto out;
  
  		if (!skb_partial_csum_set(skb, off,
 -					  offsetof(struct udphdr, check)))
 +					  offsetof(struct udphdr, check))) {
 +			err = -EPROTO;
  			goto out;
 +		}
  
  		if (recalculate_partial_csum)
  			udp_hdr(skb)->check =
@@@ -1687,6 -1578,20 +1586,20 @@@ static int xenvif_tx_submit(struct xenv
  
  		skb_probe_transport_header(skb, 0);
  
+ 		/* If the packet is GSO then we will have just set up the
+ 		 * transport header offset in checksum_setup so it's now
+ 		 * straightforward to calculate gso_segs.
+ 		 */
+ 		if (skb_is_gso(skb)) {
+ 			int mss = skb_shinfo(skb)->gso_size;
+ 			int hdrlen = skb_transport_header(skb) -
+ 				skb_mac_header(skb) +
+ 				tcp_hdrlen(skb);
+ 
+ 			skb_shinfo(skb)->gso_segs =
+ 				DIV_ROUND_UP(skb->len - hdrlen, mss);
+ 		}
+ 
  		vif->dev->stats.rx_bytes += skb->len;
  		vif->dev->stats.rx_packets++;
  
@@@ -1811,7 -1716,7 +1724,7 @@@ static struct xen_netif_rx_response *ma
  
  static inline int rx_work_todo(struct xenvif *vif)
  {
- 	return !skb_queue_empty(&vif->rx_queue);
+ 	return !skb_queue_empty(&vif->rx_queue) || vif->rx_event;
  }
  
  static inline int tx_work_todo(struct xenvif *vif)
@@@ -1861,8 -1766,6 +1774,6 @@@ int xenvif_map_frontend_rings(struct xe
  	rxs = (struct xen_netif_rx_sring *)addr;
  	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
  
- 	vif->rx_req_cons_peek = 0;
- 
  	return 0;
  
  err:
@@@ -1870,9 -1773,24 +1781,24 @@@
  	return err;
  }
  
+ void xenvif_stop_queue(struct xenvif *vif)
+ {
+ 	if (!vif->can_queue)
+ 		return;
+ 
+ 	netif_stop_queue(vif->dev);
+ }
+ 
+ static void xenvif_start_queue(struct xenvif *vif)
+ {
+ 	if (xenvif_schedulable(vif))
+ 		netif_wake_queue(vif->dev);
+ }
+ 
  int xenvif_kthread(void *data)
  {
  	struct xenvif *vif = data;
+ 	struct sk_buff *skb;
  
  	while (!kthread_should_stop()) {
  		wait_event_interruptible(vif->wq,
@@@ -1881,12 -1799,22 +1807,22 @@@
  		if (kthread_should_stop())
  			break;
  
- 		if (rx_work_todo(vif))
+ 		if (!skb_queue_empty(&vif->rx_queue))
  			xenvif_rx_action(vif);
  
+ 		vif->rx_event = false;
+ 
+ 		if (skb_queue_empty(&vif->rx_queue) &&
+ 		    netif_queue_stopped(vif->dev))
+ 			xenvif_start_queue(vif);
+ 
  		cond_resched();
  	}
  
+ 	/* Bin any remaining skbs */
+ 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
+ 		dev_kfree_skb(skb);
+ 
  	return 0;
  }
  
diff --combined include/linux/netdevice.h
index 5faaadb,51c0fe2..78d8f33
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@@ -1283,6 -1283,9 +1283,9 @@@ struct net_device 
  #if IS_ENABLED(CONFIG_NET_DSA)
  	struct dsa_switch_tree	*dsa_ptr;	/* dsa specific data */
  #endif
+ #if IS_ENABLED(CONFIG_TIPC)
+ 	struct tipc_bearer __rcu *tipc_ptr;	/* TIPC specific data */
+ #endif
  	void 			*atalk_ptr;	/* AppleTalk link 	*/
  	struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/
  	struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
@@@ -1406,7 -1409,7 +1409,7 @@@
  	union {
  		void				*ml_priv;
  		struct pcpu_lstats __percpu	*lstats; /* loopback stats */
- 		struct pcpu_tstats __percpu	*tstats; /* tunnel stats */
+ 		struct pcpu_sw_netstats __percpu	*tstats;
  		struct pcpu_dstats __percpu	*dstats; /* dummy stats */
  		struct pcpu_vstats __percpu	*vstats; /* veth stats */
  	};
@@@ -1673,7 -1676,7 +1676,7 @@@ struct offload_callbacks 
  	int			(*gso_send_check)(struct sk_buff *skb);
  	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
  					       struct sk_buff *skb);
- 	int			(*gro_complete)(struct sk_buff *skb);
+ 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
  };
  
  struct packet_offload {
@@@ -1682,6 -1685,15 +1685,15 @@@
  	struct list_head	 list;
  };
  
+ /* often modified stats are per cpu, other are shared (netdev->stats) */
+ struct pcpu_sw_netstats {
+ 	u64     rx_packets;
+ 	u64     rx_bytes;
+ 	u64     tx_packets;
+ 	u64     tx_bytes;
+ 	struct u64_stats_sync   syncp;
+ };
+ 
  #include <linux/notifier.h>
  
  /* netdevice notifier chain. Please remember to update the rtnetlink
@@@ -1738,8 -1750,6 +1750,6 @@@ netdev_notifier_info_to_dev(const struc
  	return info->dev;
  }
  
- int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
- 				  struct netdev_notifier_info *info);
  int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  
  
@@@ -1806,7 -1816,6 +1816,6 @@@ void dev_remove_pack(struct packet_typ
  void __dev_remove_pack(struct packet_type *pt);
  void dev_add_offload(struct packet_offload *po);
  void dev_remove_offload(struct packet_offload *po);
- void __dev_remove_offload(struct packet_offload *po);
  
  struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
  					unsigned short mask);
@@@ -1912,15 -1921,6 +1921,15 @@@ static inline int dev_parse_header(cons
  	return dev->header_ops->parse(skb, haddr);
  }
  
 +static inline int dev_rebuild_header(struct sk_buff *skb)
 +{
 +	const struct net_device *dev = skb->dev;
 +
 +	if (!dev->header_ops || !dev->header_ops->rebuild)
 +		return 0;
 +	return dev->header_ops->rebuild(skb);
 +}
 +
  typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
  static inline int unregister_gifconf(unsigned int family)
@@@ -2377,17 -2377,52 +2386,52 @@@ static inline int netif_copy_real_num_q
  #define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
  int netif_get_num_default_rss_queues(void);
  
- /* Use this variant when it is known for sure that it
-  * is executing from hardware interrupt context or with hardware interrupts
-  * disabled.
-  */
- void dev_kfree_skb_irq(struct sk_buff *skb);
+ enum skb_free_reason {
+ 	SKB_REASON_CONSUMED,
+ 	SKB_REASON_DROPPED,
+ };
+ 
+ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
+ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
  
- /* Use this variant in places where it could be invoked
-  * from either hardware interrupt or other context, with hardware interrupts
-  * either disabled or enabled.
+ /*
+  * It is not allowed to call kfree_skb() or consume_skb() from hardware
+  * interrupt context or with hardware interrupts being disabled.
+  * (in_irq() || irqs_disabled())
+  *
+  * We provide four helpers that can be used in following contexts :
+  *
+  * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
+  *  replacing kfree_skb(skb)
+  *
+  * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
+  *  Typically used in place of consume_skb(skb) in TX completion path
+  *
+  * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
+  *  replacing kfree_skb(skb)
+  *
+  * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
+  *  and consumed a packet. Used in place of consume_skb(skb)
   */
- void dev_kfree_skb_any(struct sk_buff *skb);
+ static inline void dev_kfree_skb_irq(struct sk_buff *skb)
+ {
+ 	__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
+ }
+ 
+ static inline void dev_consume_skb_irq(struct sk_buff *skb)
+ {
+ 	__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
+ }
+ 
+ static inline void dev_kfree_skb_any(struct sk_buff *skb)
+ {
+ 	__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
+ }
+ 
+ static inline void dev_consume_skb_any(struct sk_buff *skb)
+ {
+ 	__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
+ }
  
  int netif_rx(struct sk_buff *skb);
  int netif_rx_ni(struct sk_buff *skb);
@@@ -2781,17 -2816,10 +2825,10 @@@ int register_netdev(struct net_device *
  void unregister_netdev(struct net_device *dev);
  
  /* General hardware address lists handling functions */
- int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
- 			   struct netdev_hw_addr_list *from_list,
- 			   int addr_len, unsigned char addr_type);
- void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
- 			    struct netdev_hw_addr_list *from_list,
- 			    int addr_len, unsigned char addr_type);
  int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
  		   struct netdev_hw_addr_list *from_list, int addr_len);
  void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
  		      struct netdev_hw_addr_list *from_list, int addr_len);
- void __hw_addr_flush(struct netdev_hw_addr_list *list);
  void __hw_addr_init(struct netdev_hw_addr_list *list);
  
  /* Functions used for device addresses handling */
@@@ -2799,10 -2827,6 +2836,6 @@@ int dev_addr_add(struct net_device *dev
  		 unsigned char addr_type);
  int dev_addr_del(struct net_device *dev, const unsigned char *addr,
  		 unsigned char addr_type);
- int dev_addr_add_multiple(struct net_device *to_dev,
- 			  struct net_device *from_dev, unsigned char addr_type);
- int dev_addr_del_multiple(struct net_device *to_dev,
- 			  struct net_device *from_dev, unsigned char addr_type);
  void dev_addr_flush(struct net_device *dev);
  int dev_addr_init(struct net_device *dev);
  
@@@ -2849,7 -2873,6 +2882,6 @@@ extern int		weight_p
  extern int		bpf_jit_enable;
  
  bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
- bool netdev_has_any_upper_dev(struct net_device *dev);
  struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
  						     struct list_head **iter);
  
@@@ -2878,6 -2901,7 +2910,7 @@@ void *netdev_lower_get_next_private_rcu
  	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
  
  void *netdev_adjacent_get_private(struct list_head *adj_list);
+ void *netdev_lower_get_first_private_rcu(struct net_device *dev);
  struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
  struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
  int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
@@@ -2888,8 -2912,6 +2921,6 @@@ int netdev_master_upper_dev_link_privat
  					 void *private);
  void netdev_upper_dev_unlink(struct net_device *dev,
  			     struct net_device *upper_dev);
- void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
- 				       struct net_device *lower_dev);
  void *netdev_lower_dev_get_private(struct net_device *dev,
  				   struct net_device *lower_dev);
  int skb_checksum_help(struct sk_buff *skb);
@@@ -3017,19 -3039,6 +3048,19 @@@ static inline void netif_set_gso_max_si
  	dev->gso_max_size = size;
  }
  
 +static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
 +					int pulled_hlen, u16 mac_offset,
 +					int mac_len)
 +{
 +	skb->protocol = protocol;
 +	skb->encapsulation = 1;
 +	skb_push(skb, pulled_hlen);
 +	skb_reset_transport_header(skb);
 +	skb->mac_header = mac_offset;
 +	skb->network_header = skb->mac_header + mac_len;
 +	skb->mac_len = mac_len;
 +}
 +
  static inline bool netif_is_macvlan(struct net_device *dev)
  {
  	return dev->priv_flags & IFF_MACVLAN;
diff --combined include/linux/skbuff.h
index 6f69b3f,c5cd016..88d4f2e
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@@ -34,11 -34,82 +34,82 @@@
  #include <linux/netdev_features.h>
  #include <net/flow_keys.h>
  
+ /* A. Checksumming of received packets by device.
+  *
+  * CHECKSUM_NONE:
+  *
+  *   Device failed to checksum this packet e.g. due to lack of capabilities.
+  *   The packet contains full (though not verified) checksum in packet but
+  *   not in skb->csum. Thus, skb->csum is undefined in this case.
+  *
+  * CHECKSUM_UNNECESSARY:
+  *
+  *   The hardware you're dealing with doesn't calculate the full checksum
+  *   (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
+  *   for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will
+  *   set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still
+  *   undefined in this case though. It is a bad option, but, unfortunately,
+  *   nowadays most vendors do this. Apparently with the secret goal to sell
+  *   you new devices, when you will add new protocol to your host, f.e. IPv6 8)
+  *
+  * CHECKSUM_COMPLETE:
+  *
+  *   This is the most generic way. The device supplied checksum of the _whole_
+  *   packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
+  *   hardware doesn't need to parse L3/L4 headers to implement this.
+  *
+  *   Note: Even if device supports only some protocols, but is able to produce
+  *   skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
+  *
+  * CHECKSUM_PARTIAL:
+  *
+  *   This is identical to the case for output below. This may occur on a packet
+  *   received directly from another Linux OS, e.g., a virtualized Linux kernel
+  *   on the same host. The packet can be treated in the same way as
+  *   CHECKSUM_UNNECESSARY, except that on output (i.e., forwarding) the
+  *   checksum must be filled in by the OS or the hardware.
+  *
+  * B. Checksumming on output.
+  *
+  * CHECKSUM_NONE:
+  *
+  *   The skb was already checksummed by the protocol, or a checksum is not
+  *   required.
+  *
+  * CHECKSUM_PARTIAL:
+  *
+  *   The device is required to checksum the packet as seen by hard_start_xmit()
+  *   from skb->csum_start up to the end, and to record/write the checksum at
+  *   offset skb->csum_start + skb->csum_offset.
+  *
+  *   The device must show its capabilities in dev->features, set up at device
+  *   setup time, e.g. netdev_features.h:
+  *
+  *	NETIF_F_HW_CSUM	- It's a clever device, it's able to checksum everything.
+  *	NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over
+  *			  IPv4. Sigh. Vendors like this way for an unknown reason.
+  *			  Though, see comment above about CHECKSUM_UNNECESSARY. 8)
+  *	NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead.
+  *	NETIF_F_...     - Well, you get the picture.
+  *
+  * CHECKSUM_UNNECESSARY:
+  *
+  *   Normally, the device will do per protocol specific checksumming. Protocol
+  *   implementations that do not want the NIC to perform the checksum
+  *   calculation should use this flag in their outgoing skbs.
+  *
+  *	NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC
+  *			   offload. Correspondingly, the FCoE protocol driver
+  *			   stack should use CHECKSUM_UNNECESSARY.
+  *
+  * Any questions? No questions, good.		--ANK
+  */
+ 
  /* Don't change this without changing skb_csum_unnecessary! */
- #define CHECKSUM_NONE 0
- #define CHECKSUM_UNNECESSARY 1
- #define CHECKSUM_COMPLETE 2
- #define CHECKSUM_PARTIAL 3
+ #define CHECKSUM_NONE		0
+ #define CHECKSUM_UNNECESSARY	1
+ #define CHECKSUM_COMPLETE	2
+ #define CHECKSUM_PARTIAL	3
  
  #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
  				 ~(SMP_CACHE_BYTES - 1))
@@@ -54,58 -125,6 +125,6 @@@
  			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
  			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  
- /* A. Checksumming of received packets by device.
-  *
-  *	NONE: device failed to checksum this packet.
-  *		skb->csum is undefined.
-  *
-  *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
-  *		skb->csum is undefined.
-  *	      It is bad option, but, unfortunately, many of vendors do this.
-  *	      Apparently with secret goal to sell you new device, when you
-  *	      will add new protocol to your host. F.e. IPv6. 8)
-  *
-  *	COMPLETE: the most generic way. Device supplied checksum of _all_
-  *	    the packet as seen by netif_rx in skb->csum.
-  *	    NOTE: Even if device supports only some protocols, but
-  *	    is able to produce some skb->csum, it MUST use COMPLETE,
-  *	    not UNNECESSARY.
-  *
-  *	PARTIAL: identical to the case for output below.  This may occur
-  *	    on a packet received directly from another Linux OS, e.g.,
-  *	    a virtualised Linux kernel on the same host.  The packet can
-  *	    be treated in the same way as UNNECESSARY except that on
-  *	    output (i.e., forwarding) the checksum must be filled in
-  *	    by the OS or the hardware.
-  *
-  * B. Checksumming on output.
-  *
-  *	NONE: skb is checksummed by protocol or csum is not required.
-  *
-  *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
-  *	from skb->csum_start to the end and to record the checksum
-  *	at skb->csum_start + skb->csum_offset.
-  *
-  *	Device must show its capabilities in dev->features, set
-  *	at device setup time.
-  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
-  *			  everything.
-  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
-  *			  TCP/UDP over IPv4. Sigh. Vendors like this
-  *			  way by an unknown reason. Though, see comment above
-  *			  about CHECKSUM_UNNECESSARY. 8)
-  *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
-  *
-  *	UNNECESSARY: device will do per protocol specific csum. Protocol drivers
-  *	that do not want net to perform the checksum calculation should use
-  *	this flag in their outgoing skbs.
-  *	NETIF_F_FCOE_CRC  this indicates the device can do FCoE FC CRC
-  *			  offload. Correspondingly, the FCoE protocol driver
-  *			  stack should use CHECKSUM_UNNECESSARY.
-  *
-  *	Any questions? No questions, good. 		--ANK
-  */
- 
  struct net_device;
  struct scatterlist;
  struct pipe_inode_info;
@@@ -703,15 -722,73 +722,73 @@@ unsigned int skb_find_text(struct sk_bu
  			   unsigned int to, struct ts_config *config,
  			   struct ts_state *state);
  
- void __skb_get_rxhash(struct sk_buff *skb);
- static inline __u32 skb_get_rxhash(struct sk_buff *skb)
+ /*
+  * Packet hash types specify the type of hash in skb_set_hash.
+  *
+  * Hash types refer to the protocol layer addresses which are used to
+  * construct a packet's hash. The hashes are used to differentiate or identify
+  * flows of the protocol layer for the hash type. Hash types are either
+  * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
+  *
+  * Properties of hashes:
+  *
+  * 1) Two packets in different flows have different hash values
+  * 2) Two packets in the same flow should have the same hash value
+  *
+  * A hash at a higher layer is considered to be more specific. A driver should
+  * set the most specific hash possible.
+  *
+  * A driver cannot indicate a more specific hash than the layer at which a hash
+  * was computed. For instance an L3 hash cannot be set as an L4 hash.
+  *
+  * A driver may indicate a hash level which is less specific than the
+  * actual layer the hash was computed on. For instance, a hash computed
+  * at L4 may be considered an L3 hash. This should only be done if the
+  * driver can't unambiguously determine that the HW computed the hash at
+  * the higher layer. Note that the "should" in the second property above
+  * permits this.
+  */
+ enum pkt_hash_types {
+ 	PKT_HASH_TYPE_NONE,	/* Undefined type */
+ 	PKT_HASH_TYPE_L2,	/* Input: src_MAC, dest_MAC */
+ 	PKT_HASH_TYPE_L3,	/* Input: src_IP, dst_IP */
+ 	PKT_HASH_TYPE_L4,	/* Input: src_IP, dst_IP, src_port, dst_port */
+ };
+ 
+ static inline void
+ skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+ {
+ 	skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
+ 	skb->rxhash = hash;
+ }
+ 
+ void __skb_get_hash(struct sk_buff *skb);
+ static inline __u32 skb_get_hash(struct sk_buff *skb)
  {
  	if (!skb->l4_rxhash)
- 		__skb_get_rxhash(skb);
+ 		__skb_get_hash(skb);
  
  	return skb->rxhash;
  }
  
+ static inline void skb_clear_hash(struct sk_buff *skb)
+ {
+ 	skb->rxhash = 0;
+ 	skb->l4_rxhash = 0;
+ }
+ 
+ static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+ {
+ 	if (!skb->l4_rxhash)
+ 		skb_clear_hash(skb);
+ }
+ 
+ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
+ {
+ 	to->rxhash = from->rxhash;
+ 	to->l4_rxhash = from->l4_rxhash;
+ };
+ 
  #ifdef NET_SKBUFF_DATA_USES_OFFSET
  static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
  {
@@@ -1638,11 -1715,6 +1715,11 @@@ static inline void skb_set_mac_header(s
  	skb->mac_header += offset;
  }
  
 +static inline void skb_pop_mac_header(struct sk_buff *skb)
 +{
 +	skb->mac_header = skb->network_header;
 +}
 +
  static inline void skb_probe_transport_header(struct sk_buff *skb,
  					      const int offset_hint)
  {
@@@ -2397,6 -2469,24 +2474,24 @@@ static inline void *skb_header_pointer(
  	return buffer;
  }
  
+ /**
+  *	skb_needs_linearize - check if we need to linearize a given skb
+  *			      depending on the given device features.
+  *	@skb: socket buffer to check
+  *	@features: net device features
+  *
+  *	Returns true if either:
+  *	1. skb has frag_list and the device doesn't support FRAGLIST, or
+  *	2. skb is fragmented and the device does not support SG.
+  */
+ static inline bool skb_needs_linearize(struct sk_buff *skb,
+ 				       netdev_features_t features)
+ {
+ 	return skb_is_nonlinear(skb) &&
+ 	       ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
+ 		(skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
+ }
+ 
  static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
  					     void *to,
  					     const unsigned int len)
@@@ -2531,10 -2621,6 +2626,10 @@@ static inline void sw_tx_timestamp(stru
   * Ethernet MAC Drivers should call this function in their hard_xmit()
   * function immediately before giving the sk_buff to the MAC hardware.
   *
 + * Specifically, one should make absolutely sure that this function is
 + * called before TX completion of this packet can trigger.  Otherwise
 + * the packet could potentially already be freed.
 + *
   * @skb: A socket buffer.
   */
  static inline void skb_tx_timestamp(struct sk_buff *skb)
diff --combined include/net/sctp/structs.h
index 0a248b3,41c7013..e9f732f
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@@ -19,9 -19,8 +19,8 @@@
   * See the GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
-  * along with GNU CC; see the file COPYING.  If not, write to
-  * the Free Software Foundation, 59 Temple Place - Suite 330,
-  * Boston, MA 02111-1307, USA.
+  * along with GNU CC; see the file COPYING.  If not, see
+  * <http://www.gnu.org/licenses/>.
   *
   * Please send any bug reports or fixes you make to the
   * email addresses:
@@@ -1046,6 -1045,9 +1045,6 @@@ struct sctp_outq 
  
  	/* Corked? */
  	char cork;
 -
 -	/* Is this structure empty?  */
 -	char empty;
  };
  
  void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
@@@ -1358,12 -1360,6 +1357,6 @@@ struct sctp_association 
  
  	/* This is all information about our peer.  */
  	struct {
- 		/* rwnd
- 		 *
- 		 * Peer Rwnd   : Current calculated value of the peer's rwnd.
- 		 */
- 		__u32 rwnd;
- 
  		/* transport_addr_list
  		 *
  		 * Peer	       : A list of SCTP transport addresses that the
@@@ -1381,6 -1377,12 +1374,12 @@@
  		 */
  		struct list_head transport_addr_list;
  
+ 		/* rwnd
+ 		 *
+ 		 * Peer Rwnd   : Current calculated value of the peer's rwnd.
+ 		 */
+ 		__u32 rwnd;
+ 
  		/* transport_count
  		 *
  		 * Peer        : A count of the number of peer addresses
@@@ -1463,6 -1465,20 +1462,20 @@@
  		 */
  		struct sctp_tsnmap tsn_map;
  
+ 		/* This mask is used to disable sending the ASCONF chunk
+ 		 * with specified parameter to peer.
+ 		 */
+ 		__be16 addip_disabled_mask;
+ 
+ 		/* These are capabilities which our peer advertised.  */
+ 		__u8	ecn_capable:1,      /* Can peer do ECN? */
+ 			ipv4_address:1,     /* Peer understands IPv4 addresses? */
+ 			ipv6_address:1,     /* Peer understands IPv6 addresses? */
+ 			hostname_address:1, /* Peer understands DNS addresses? */
+ 			asconf_capable:1,   /* Does peer support ADDIP? */
+ 			prsctp_capable:1,   /* Can peer do PR-SCTP? */
+ 			auth_capable:1;     /* Is peer doing SCTP-AUTH? */
+ 
  		/* Ack State   : This flag indicates if the next received
  		 *             : packet is to be responded to with a
  		 *             : SACK. This is initializedto 0.  When a packet
@@@ -1477,25 -1493,11 +1490,11 @@@
  		__u32	sack_cnt;
  		__u32	sack_generation;
  
- 		/* These are capabilities which our peer advertised.  */
- 		__u8	ecn_capable:1,	    /* Can peer do ECN? */
- 			ipv4_address:1,	    /* Peer understands IPv4 addresses? */
- 			ipv6_address:1,	    /* Peer understands IPv6 addresses? */
- 			hostname_address:1, /* Peer understands DNS addresses? */
- 			asconf_capable:1,   /* Does peer support ADDIP? */
- 			prsctp_capable:1,   /* Can peer do PR-SCTP? */
- 			auth_capable:1;	    /* Is peer doing SCTP-AUTH? */
- 
  		__u32   adaptation_ind;	 /* Adaptation Code point. */
  
- 		/* This mask is used to disable sending the ASCONF chunk
- 		 * with specified parameter to peer.
- 		 */
- 		__be16 addip_disabled_mask;
- 
  		struct sctp_inithdr_host i;
- 		int cookie_len;
  		void *cookie;
+ 		int cookie_len;
  
  		/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
  		 * C1) ... "Peer-Serial-Number'. This value MUST be initialized to the
@@@ -1527,14 -1529,14 +1526,14 @@@
  	 */
  	sctp_state_t state;
  
- 	/* The cookie life I award for any cookie.  */
- 	ktime_t cookie_life;
- 
  	/* Overall     : The overall association error count.
  	 * Error Count : [Clear this any time I get something.]
  	 */
  	int overall_error_count;
  
+ 	/* The cookie life I award for any cookie.  */
+ 	ktime_t cookie_life;
+ 
  	/* These are the association's initial, max, and min RTO values.
  	 * These values will be initialized by system defaults, but can
  	 * be modified via the SCTP_RTOINFO socket option.
@@@ -1589,10 -1591,9 +1588,9 @@@
  	/* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
  	__u32 param_flags;
  
+ 	__u32 sackfreq;
  	/* SACK delay timeout */
  	unsigned long sackdelay;
- 	__u32 sackfreq;
- 
  
  	unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
  	struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
@@@ -1600,12 -1601,12 +1598,12 @@@
  	/* Transport to which SHUTDOWN chunk was last sent.  */
  	struct sctp_transport *shutdown_last_sent_to;
  
  	/* Transport to which INIT chunk was last sent.  */
  	struct sctp_transport *init_last_sent_to;
  
+ 	/* How many times have we resent a SHUTDOWN */
+ 	int shutdown_retries;
+ 
  	/* Next TSN    : The next TSN number to be assigned to a new
  	 *	       : DATA chunk.  This is sent in the INIT or INIT
  	 *	       : ACK chunk to the peer and incremented each
@@@ -1810,8 -1811,8 +1808,8 @@@
  	 * after reaching 4294967295.
  	 */
  	__u32 addip_serial;
- 	union sctp_addr *asconf_addr_del_pending;
  	int src_out_of_asoc_ok;
+ 	union sctp_addr *asconf_addr_del_pending;
  	struct sctp_transport *new_transport;
  
  	/* SCTP AUTH: list of the endpoint shared keys.  These
diff --combined include/uapi/linux/pci_regs.h
index ab6b4e7,c870c2a..30db069
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@@ -489,7 -489,12 +489,12 @@@
  #define  PCI_EXP_LNKSTA_CLS	0x000f	/* Current Link Speed */
  #define  PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
  #define  PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
+ #define  PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
  #define  PCI_EXP_LNKSTA_NLW	0x03f0	/* Negotiated Link Width */
+ #define  PCI_EXP_LNKSTA_NLW_X1	0x0010	/* Current Link Width x1 */
+ #define  PCI_EXP_LNKSTA_NLW_X2	0x0020	/* Current Link Width x2 */
+ #define  PCI_EXP_LNKSTA_NLW_X4	0x0040	/* Current Link Width x4 */
+ #define  PCI_EXP_LNKSTA_NLW_X8	0x0080	/* Current Link Width x8 */
  #define  PCI_EXP_LNKSTA_NLW_SHIFT 4	/* start of NLW mask in link status */
  #define  PCI_EXP_LNKSTA_LT	0x0800	/* Link Training */
  #define  PCI_EXP_LNKSTA_SLC	0x1000	/* Slot Clock Configuration */
@@@ -518,16 -523,8 +523,16 @@@
  #define  PCI_EXP_SLTCTL_CCIE	0x0010	/* Command Completed Interrupt Enable */
  #define  PCI_EXP_SLTCTL_HPIE	0x0020	/* Hot-Plug Interrupt Enable */
  #define  PCI_EXP_SLTCTL_AIC	0x00c0	/* Attention Indicator Control */
 +#define  PCI_EXP_SLTCTL_ATTN_IND_ON    0x0040 /* Attention Indicator on */
 +#define  PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */
 +#define  PCI_EXP_SLTCTL_ATTN_IND_OFF   0x00c0 /* Attention Indicator off */
  #define  PCI_EXP_SLTCTL_PIC	0x0300	/* Power Indicator Control */
 +#define  PCI_EXP_SLTCTL_PWR_IND_ON     0x0100 /* Power Indicator on */
 +#define  PCI_EXP_SLTCTL_PWR_IND_BLINK  0x0200 /* Power Indicator blinking */
 +#define  PCI_EXP_SLTCTL_PWR_IND_OFF    0x0300 /* Power Indicator off */
  #define  PCI_EXP_SLTCTL_PCC	0x0400	/* Power Controller Control */
 +#define  PCI_EXP_SLTCTL_PWR_ON         0x0000 /* Power On */
 +#define  PCI_EXP_SLTCTL_PWR_OFF        0x0400 /* Power Off */
  #define  PCI_EXP_SLTCTL_EIC	0x0800	/* Electromechanical Interlock Control */
  #define  PCI_EXP_SLTCTL_DLLSCE	0x1000	/* Data Link Layer State Changed Enable */
  #define PCI_EXP_SLTSTA		26	/* Slot Status */
@@@ -685,34 -682,17 +690,34 @@@
  #define PCI_ERR_ROOT_ERR_SRC	52	/* Error Source Identification */
  
  /* Virtual Channel */
 -#define PCI_VC_PORT_REG1	4
 -#define  PCI_VC_REG1_EVCC	0x7	/* extended VC count */
 -#define PCI_VC_PORT_REG2	8
 -#define  PCI_VC_REG2_32_PHASE	0x2
 -#define  PCI_VC_REG2_64_PHASE	0x4
 -#define  PCI_VC_REG2_128_PHASE	0x8
 +#define PCI_VC_PORT_CAP1	4
 +#define  PCI_VC_CAP1_EVCC	0x00000007	/* extended VC count */
 +#define  PCI_VC_CAP1_LPEVCC	0x00000070	/* low prio extended VC count */
 +#define  PCI_VC_CAP1_ARB_SIZE	0x00000c00
 +#define PCI_VC_PORT_CAP2	8
 +#define  PCI_VC_CAP2_32_PHASE		0x00000002
 +#define  PCI_VC_CAP2_64_PHASE		0x00000004
 +#define  PCI_VC_CAP2_128_PHASE		0x00000008
 +#define  PCI_VC_CAP2_ARB_OFF		0xff000000
  #define PCI_VC_PORT_CTRL	12
 +#define  PCI_VC_PORT_CTRL_LOAD_TABLE	0x00000001
  #define PCI_VC_PORT_STATUS	14
 +#define  PCI_VC_PORT_STATUS_TABLE	0x00000001
  #define PCI_VC_RES_CAP		16
 +#define  PCI_VC_RES_CAP_32_PHASE	0x00000002
 +#define  PCI_VC_RES_CAP_64_PHASE	0x00000004
 +#define  PCI_VC_RES_CAP_128_PHASE	0x00000008
 +#define  PCI_VC_RES_CAP_128_PHASE_TB	0x00000010
 +#define  PCI_VC_RES_CAP_256_PHASE	0x00000020
 +#define  PCI_VC_RES_CAP_ARB_OFF		0xff000000
  #define PCI_VC_RES_CTRL		20
 +#define  PCI_VC_RES_CTRL_LOAD_TABLE	0x00010000
 +#define  PCI_VC_RES_CTRL_ARB_SELECT	0x000e0000
 +#define  PCI_VC_RES_CTRL_ID		0x07000000
 +#define  PCI_VC_RES_CTRL_ENABLE		0x80000000
  #define PCI_VC_RES_STATUS	26
 +#define  PCI_VC_RES_STATUS_TABLE	0x00000001
 +#define  PCI_VC_RES_STATUS_NEGO		0x00000002
  #define PCI_CAP_VC_BASE_SIZEOF		0x10
  #define PCI_CAP_VC_PER_VC_SIZEOF	0x0C
  
diff --combined net/batman-adv/translation-table.c
index ff625fe,06506e6..19bc42f
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@@ -51,7 -51,7 +51,7 @@@ static int batadv_compare_tt(const stru
  	const void *data1 = container_of(node, struct batadv_tt_common_entry,
  					 hash_entry);
  
- 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+ 	return batadv_compare_eth(data1, data2);
  }
  
  /**
@@@ -333,8 -333,7 +333,8 @@@ static void batadv_tt_local_event(struc
  		return;
  
  	tt_change_node->change.flags = flags;
 -	tt_change_node->change.reserved = 0;
 +	memset(tt_change_node->change.reserved, 0,
 +	       sizeof(tt_change_node->change.reserved));
  	memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
  	tt_change_node->change.vid = htons(common->vid);
  
@@@ -2222,8 -2221,7 +2222,8 @@@ static void batadv_tt_tvlv_generate(str
  			       ETH_ALEN);
  			tt_change->flags = tt_common_entry->flags;
  			tt_change->vid = htons(tt_common_entry->vid);
 -			tt_change->reserved = 0;
 +			memset(tt_change->reserved, 0,
 +			       sizeof(tt_change->reserved));
  
  			tt_num_entries++;
  			tt_change++;
diff --combined net/core/dev.c
index 4fc1722,77f43aa..8897abc
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -480,7 -480,7 +480,7 @@@ EXPORT_SYMBOL(dev_add_offload)
   *	and must not be freed until after all the CPU's have gone
   *	through a quiescent state.
   */
- void __dev_remove_offload(struct packet_offload *po)
+ static void __dev_remove_offload(struct packet_offload *po)
  {
  	struct list_head *head = &offload_base;
  	struct packet_offload *po1;
@@@ -498,7 -498,6 +498,6 @@@
  out:
  	spin_unlock(&offload_lock);
  }
- EXPORT_SYMBOL(__dev_remove_offload);
  
  /**
   *	dev_remove_offload	 - remove packet offload handler
@@@ -1566,14 -1565,14 +1565,14 @@@ EXPORT_SYMBOL(unregister_netdevice_noti
   *	are as for raw_notifier_call_chain().
   */
  
- int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
- 				  struct netdev_notifier_info *info)
+ static int call_netdevice_notifiers_info(unsigned long val,
+ 					 struct net_device *dev,
+ 					 struct netdev_notifier_info *info)
  {
  	ASSERT_RTNL();
  	netdev_notifier_info_init(info, dev);
  	return raw_notifier_call_chain(&netdev_chain, val, info);
  }
- EXPORT_SYMBOL(call_netdevice_notifiers_info);
  
  /**
   *	call_netdevice_notifiers - call all network notifier blocks
@@@ -2145,30 -2144,42 +2144,42 @@@ void __netif_schedule(struct Qdisc *q
  }
  EXPORT_SYMBOL(__netif_schedule);
  
- void dev_kfree_skb_irq(struct sk_buff *skb)
+ struct dev_kfree_skb_cb {
+ 	enum skb_free_reason reason;
+ };
+ 
+ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
  {
- 	if (atomic_dec_and_test(&skb->users)) {
- 		struct softnet_data *sd;
- 		unsigned long flags;
+ 	return (struct dev_kfree_skb_cb *)skb->cb;
+ }
  
- 		local_irq_save(flags);
- 		sd = &__get_cpu_var(softnet_data);
- 		skb->next = sd->completion_queue;
- 		sd->completion_queue = skb;
- 		raise_softirq_irqoff(NET_TX_SOFTIRQ);
- 		local_irq_restore(flags);
+ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+ {
+ 	unsigned long flags;
+ 
+ 	if (likely(atomic_read(&skb->users) == 1)) {
+ 		smp_rmb();
+ 		atomic_set(&skb->users, 0);
+ 	} else if (likely(!atomic_dec_and_test(&skb->users))) {
+ 		return;
  	}
+ 	get_kfree_skb_cb(skb)->reason = reason;
+ 	local_irq_save(flags);
+ 	skb->next = __this_cpu_read(softnet_data.completion_queue);
+ 	__this_cpu_write(softnet_data.completion_queue, skb);
+ 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ 	local_irq_restore(flags);
  }
- EXPORT_SYMBOL(dev_kfree_skb_irq);
+ EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
- void dev_kfree_skb_any(struct sk_buff *skb)
+ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
  {
  	if (in_irq() || irqs_disabled())
- 		dev_kfree_skb_irq(skb);
+ 		__dev_kfree_skb_irq(skb, reason);
  	else
  		dev_kfree_skb(skb);
  }
- EXPORT_SYMBOL(dev_kfree_skb_any);
+ EXPORT_SYMBOL(__dev_kfree_skb_any);
  
  
  /**
@@@ -2442,13 -2453,8 +2453,8 @@@ static void dev_gso_skb_destructor(stru
  {
  	struct dev_gso_cb *cb;
  
- 	do {
- 		struct sk_buff *nskb = skb->next;
- 
- 		skb->next = nskb->next;
- 		nskb->next = NULL;
- 		kfree_skb(nskb);
- 	} while (skb->next);
+ 	kfree_skb_list(skb->next);
+ 	skb->next = NULL;
  
  	cb = DEV_GSO_CB(skb);
  	if (cb->destructor)
@@@ -2523,21 -2529,6 +2529,6 @@@ netdev_features_t netif_skb_features(st
  }
  EXPORT_SYMBOL(netif_skb_features);
  
- /*
-  * Returns true if either:
-  *	1. skb has frag_list and the device doesn't support FRAGLIST, or
-  *	2. skb is fragmented and the device does not support SG.
-  */
- static inline int skb_needs_linearize(struct sk_buff *skb,
- 				      netdev_features_t features)
- {
- 	return skb_is_nonlinear(skb) &&
- 			((skb_has_frag_list(skb) &&
- 				!(features & NETIF_F_FRAGLIST)) ||
- 			(skb_shinfo(skb)->nr_frags &&
- 				!(features & NETIF_F_SG)));
- }
- 
  int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
  			struct netdev_queue *txq, void *accel_priv)
  {
@@@ -3009,7 -3000,7 +3000,7 @@@ static int get_rps_cpu(struct net_devic
  	}
  
  	skb_reset_network_header(skb);
- 	if (!skb_get_rxhash(skb))
+ 	if (!skb_get_hash(skb))
  		goto done;
  
  	flow_table = rcu_dereference(rxqueue->rps_flow_table);
@@@ -3154,7 -3145,7 +3145,7 @@@ static bool skb_flow_limit(struct sk_bu
  	rcu_read_lock();
  	fl = rcu_dereference(sd->flow_limit);
  	if (fl) {
- 		new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
+ 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
  		old_flow = fl->history[fl->history_head];
  		fl->history[fl->history_head] = new_flow;
  
@@@ -3306,7 -3297,10 +3297,10 @@@ static void net_tx_action(struct softir
  			clist = clist->next;
  
  			WARN_ON(atomic_read(&skb->users));
- 			trace_kfree_skb(skb, net_tx_action);
+ 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
+ 				trace_consume_skb(skb);
+ 			else
+ 				trace_kfree_skb(skb, net_tx_action);
  			__kfree_skb(skb);
  		}
  	}
@@@ -3752,7 -3746,7 +3746,7 @@@ static int napi_gro_complete(struct sk_
  		if (ptype->type != type || !ptype->callbacks.gro_complete)
  			continue;
  
- 		err = ptype->callbacks.gro_complete(skb);
+ 		err = ptype->callbacks.gro_complete(skb, 0);
  		break;
  	}
  	rcu_read_unlock();
@@@ -3818,6 -3812,23 +3812,23 @@@ static void gro_list_prepare(struct nap
  	}
  }
  
+ static void skb_gro_reset_offset(struct sk_buff *skb)
+ {
+ 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
+ 	const skb_frag_t *frag0 = &pinfo->frags[0];
+ 
+ 	NAPI_GRO_CB(skb)->data_offset = 0;
+ 	NAPI_GRO_CB(skb)->frag0 = NULL;
+ 	NAPI_GRO_CB(skb)->frag0_len = 0;
+ 
+ 	if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
+ 	    pinfo->nr_frags &&
+ 	    !PageHighMem(skb_frag_page(frag0))) {
+ 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+ 		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
+ 	}
+ }
+ 
  static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
  	struct sk_buff **pp = NULL;
@@@ -3833,6 -3844,7 +3844,7 @@@
  	if (skb_is_gso(skb) || skb_has_frag_list(skb))
  		goto normal;
  
+ 	skb_gro_reset_offset(skb);
  	gro_list_prepare(napi, skb);
  
  	rcu_read_lock();
@@@ -3938,27 -3950,8 +3950,8 @@@ static gro_result_t napi_skb_finish(gro
  	return ret;
  }
  
- static void skb_gro_reset_offset(struct sk_buff *skb)
- {
- 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
- 	const skb_frag_t *frag0 = &pinfo->frags[0];
- 
- 	NAPI_GRO_CB(skb)->data_offset = 0;
- 	NAPI_GRO_CB(skb)->frag0 = NULL;
- 	NAPI_GRO_CB(skb)->frag0_len = 0;
- 
- 	if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
- 	    pinfo->nr_frags &&
- 	    !PageHighMem(skb_frag_page(frag0))) {
- 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
- 		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
- 	}
- }
- 
  gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
- 	skb_gro_reset_offset(skb);
- 
  	return napi_skb_finish(dev_gro_receive(napi, skb), skb);
  }
  EXPORT_SYMBOL(napi_gro_receive);
@@@ -3981,8 -3974,7 +3974,7 @@@ struct sk_buff *napi_get_frags(struct n
  
  	if (!skb) {
  		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
- 		if (skb)
- 			napi->skb = skb;
+ 		napi->skb = skb;
  	}
  	return skb;
  }
@@@ -3993,12 -3985,7 +3985,7 @@@ static gro_result_t napi_frags_finish(s
  {
  	switch (ret) {
  	case GRO_NORMAL:
- 	case GRO_HELD:
- 		skb->protocol = eth_type_trans(skb, skb->dev);
- 
- 		if (ret == GRO_HELD)
- 			skb_gro_pull(skb, -ETH_HLEN);
- 		else if (netif_receive_skb(skb))
+ 		if (netif_receive_skb(skb))
  			ret = GRO_DROP;
  		break;
  
@@@ -4007,6 -3994,7 +3994,7 @@@
  		napi_reuse_skb(napi, skb);
  		break;
  
+ 	case GRO_HELD:
  	case GRO_MERGED:
  		break;
  	}
@@@ -4017,36 -4005,15 +4005,15 @@@
  static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  {
  	struct sk_buff *skb = napi->skb;
  
  	napi->skb = NULL;
  
- 	skb_reset_mac_header(skb);
- 	skb_gro_reset_offset(skb);
- 
- 	off = skb_gro_offset(skb);
- 	hlen = off + sizeof(*eth);
- 	eth = skb_gro_header_fast(skb, off);
- 	if (skb_gro_header_hard(skb, hlen)) {
- 		eth = skb_gro_header_slow(skb, hlen, off);
- 		if (unlikely(!eth)) {
- 			napi_reuse_skb(napi, skb);
- 			skb = NULL;
- 			goto out;
- 		}
+ 	if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
+ 		napi_reuse_skb(napi, skb);
+ 		return NULL;
  	}
+ 	skb->protocol = eth_type_trans(skb, skb->dev);
  
- 	skb_gro_pull(skb, sizeof(*eth));
- 
- 	/*
- 	 * This works because the only protocols we care about don't require
- 	 * special handling.  We'll fix it up properly at the end.
- 	 */
- 	skb->protocol = eth->h_proto;
- 
- out:
  	return skb;
  }
  
@@@ -4062,7 -4029,7 +4029,7 @@@ gro_result_t napi_gro_frags(struct napi
  EXPORT_SYMBOL(napi_gro_frags);
  
  /*
-  * net_rps_action sends any pending IPI's for rps.
+  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
   * Note: called with local irq disabled, but exits with local irq enabled.
   */
  static void net_rps_action_and_irq_enable(struct softnet_data *sd)
@@@ -4267,17 -4234,10 +4234,10 @@@ EXPORT_SYMBOL(netif_napi_add)
  
  void netif_napi_del(struct napi_struct *napi)
  {
  	list_del_init(&napi->dev_list);
  	napi_free_frags(napi);
  
- 	for (skb = napi->gro_list; skb; skb = next) {
- 		next = skb->next;
- 		skb->next = NULL;
- 		kfree_skb(skb);
- 	}
- 
+ 	kfree_skb_list(napi->gro_list);
  	napi->gro_list = NULL;
  	napi->gro_count = 0;
  }
@@@ -4394,19 -4354,6 +4354,6 @@@ struct netdev_adjacent 
  	struct rcu_head rcu;
  };
  
- static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
- 						     struct net_device *adj_dev,
- 						     struct list_head *adj_list)
- {
- 	struct netdev_adjacent *adj;
- 
- 	list_for_each_entry_rcu(adj, adj_list, list) {
- 		if (adj->dev == adj_dev)
- 			return adj;
- 	}
- 	return NULL;
- }
- 
  static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
  						 struct net_device *adj_dev,
  						 struct list_head *adj_list)
@@@ -4445,13 -4392,12 +4392,12 @@@ EXPORT_SYMBOL(netdev_has_upper_dev)
   * Find out if a device is linked to an upper device and return true in case
   * it is. The caller must hold the RTNL lock.
   */
- bool netdev_has_any_upper_dev(struct net_device *dev)
+ static bool netdev_has_any_upper_dev(struct net_device *dev)
  {
  	ASSERT_RTNL();
  
  	return !list_empty(&dev->all_adj_list.upper);
  }
- EXPORT_SYMBOL(netdev_has_any_upper_dev);
  
  /**
   * netdev_master_upper_dev_get - Get master upper device
@@@ -4500,7 -4446,7 +4446,7 @@@ struct net_device *netdev_all_upper_get
  {
  	struct netdev_adjacent *upper;
  
 -	WARN_ON_ONCE(!rcu_read_lock_held());
 +	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
  
  	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  
@@@ -4571,6 -4517,27 +4517,27 @@@ void *netdev_lower_get_next_private_rcu
  EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  
  /**
+  * netdev_lower_get_first_private_rcu - Get the first ->private from the
+  *				       lower neighbour list, RCU
+  *				       variant
+  * @dev: device
+  *
+  * Gets the first netdev_adjacent->private from the dev's lower neighbour
+  * list. The caller must hold RCU read lock.
+  */
+ void *netdev_lower_get_first_private_rcu(struct net_device *dev)
+ {
+ 	struct netdev_adjacent *lower;
+ 
+ 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
+ 			struct netdev_adjacent, list);
+ 	if (lower)
+ 		return lower->private;
+ 	return NULL;
+ }
+ EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
+ 
+ /**
   * netdev_master_upper_dev_get_rcu - Get master upper device
   * @dev: device
   *
@@@ -4662,9 -4629,9 +4629,9 @@@ free_adj
  	return ret;
  }
  
- void __netdev_adjacent_dev_remove(struct net_device *dev,
- 				  struct net_device *adj_dev,
- 				  struct list_head *dev_list)
+ static void __netdev_adjacent_dev_remove(struct net_device *dev,
+ 					 struct net_device *adj_dev,
+ 					 struct list_head *dev_list)
  {
  	struct netdev_adjacent *adj;
  	char linkname[IFNAMSIZ+7];
@@@ -4702,11 -4669,11 +4669,11 @@@
  	kfree_rcu(adj, rcu);
  }
  
- int __netdev_adjacent_dev_link_lists(struct net_device *dev,
- 				     struct net_device *upper_dev,
- 				     struct list_head *up_list,
- 				     struct list_head *down_list,
- 				     void *private, bool master)
+ static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+ 					    struct net_device *upper_dev,
+ 					    struct list_head *up_list,
+ 					    struct list_head *down_list,
+ 					    void *private, bool master)
  {
  	int ret;
  
@@@ -4725,8 -4692,8 +4692,8 @@@
  	return 0;
  }
  
- int __netdev_adjacent_dev_link(struct net_device *dev,
- 			       struct net_device *upper_dev)
+ static int __netdev_adjacent_dev_link(struct net_device *dev,
+ 				      struct net_device *upper_dev)
  {
  	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
  						&dev->all_adj_list.upper,
@@@ -4734,26 -4701,26 +4701,26 @@@
  						NULL, false);
  }
  
- void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
- 					struct net_device *upper_dev,
- 					struct list_head *up_list,
- 					struct list_head *down_list)
+ static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+ 					       struct net_device *upper_dev,
+ 					       struct list_head *up_list,
+ 					       struct list_head *down_list)
  {
  	__netdev_adjacent_dev_remove(dev, upper_dev, up_list);
  	__netdev_adjacent_dev_remove(upper_dev, dev, down_list);
  }
  
- void __netdev_adjacent_dev_unlink(struct net_device *dev,
- 				  struct net_device *upper_dev)
+ static void __netdev_adjacent_dev_unlink(struct net_device *dev,
+ 					 struct net_device *upper_dev)
  {
  	__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
  					   &dev->all_adj_list.upper,
  					   &upper_dev->all_adj_list.lower);
  }
  
- int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
- 					 struct net_device *upper_dev,
- 					 void *private, bool master)
+ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+ 						struct net_device *upper_dev,
+ 						void *private, bool master)
  {
  	int ret = __netdev_adjacent_dev_link(dev, upper_dev);
  
@@@ -4772,8 -4739,8 +4739,8 @@@
  	return 0;
  }
  
- void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
- 					    struct net_device *upper_dev)
+ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+ 						   struct net_device *upper_dev)
  {
  	__netdev_adjacent_dev_unlink(dev, upper_dev);
  	__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
@@@ -4962,21 -4929,6 +4929,6 @@@ void netdev_upper_dev_unlink(struct net
  }
  EXPORT_SYMBOL(netdev_upper_dev_unlink);
  
- void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
- 				       struct net_device *lower_dev)
- {
- 	struct netdev_adjacent *lower;
- 
- 	if (!lower_dev)
- 		return NULL;
- 	lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
- 	if (!lower)
- 		return NULL;
- 
- 	return lower->private;
- }
- EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
- 
  void *netdev_lower_dev_get_private(struct net_device *dev,
  				   struct net_device *lower_dev)
  {
diff --combined net/core/neighbour.c
index 932c6d7,a666740..ea97361
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@@ -38,6 -38,8 +38,8 @@@
  #include <linux/random.h>
  #include <linux/string.h>
  #include <linux/log2.h>
+ #include <linux/inetdevice.h>
+ #include <net/addrconf.h>
  
  #define DEBUG
  #define NEIGH_DEBUG 1
@@@ -497,7 -499,7 +499,7 @@@ struct neighbour *__neigh_create(struc
  		goto out_neigh_release;
  	}
  
- 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
+ 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
  
  	write_lock_bh(&tbl->lock);
  	nht = rcu_dereference_protected(tbl->nht,
@@@ -776,7 -778,7 +778,7 @@@ static void neigh_periodic_work(struct 
  		tbl->last_rand = jiffies;
  		for (p = &tbl->parms; p; p = p->next)
  			p->reachable_time =
- 				neigh_rand_reach_time(p->base_reachable_time);
+ 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
  	}
  
  	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@@ -799,7 -801,7 +801,7 @@@
  
  			if (atomic_read(&n->refcnt) == 1 &&
  			    (state == NUD_FAILED ||
- 			     time_after(jiffies, n->used + n->parms->gc_staletime))) {
+ 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
  				*np = n->next;
  				n->dead = 1;
  				write_unlock(&n->lock);
@@@ -822,12 -824,12 +824,12 @@@ next_elt
  						lockdep_is_held(&tbl->lock));
  	}
  out:
- 	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
- 	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
- 	 * base_reachable_time.
+ 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
+ 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
+ 	 * BASE_REACHABLE_TIME.
  	 */
  	schedule_delayed_work(&tbl->gc_work,
- 			      tbl->parms.base_reachable_time >> 1);
+ 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
  	write_unlock_bh(&tbl->lock);
  }
  
@@@ -835,8 -837,9 +837,9 @@@ static __inline__ int neigh_max_probes(
  {
  	struct neigh_parms *p = n->parms;
  	return (n->nud_state & NUD_PROBE) ?
- 		p->ucast_probes :
- 		p->ucast_probes + p->app_probes + p->mcast_probes;
+ 		NEIGH_VAR(p, UCAST_PROBES) :
+ 		NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
+ 		NEIGH_VAR(p, MCAST_PROBES);
  }
  
  static void neigh_invalidate(struct neighbour *neigh)
@@@ -901,12 -904,13 +904,13 @@@ static void neigh_timer_handler(unsigne
  			neigh_dbg(2, "neigh %p is still alive\n", neigh);
  			next = neigh->confirmed + neigh->parms->reachable_time;
  		} else if (time_before_eq(now,
- 					  neigh->used + neigh->parms->delay_probe_time)) {
+ 					  neigh->used +
+ 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
  			neigh_dbg(2, "neigh %p is delayed\n", neigh);
  			neigh->nud_state = NUD_DELAY;
  			neigh->updated = jiffies;
  			neigh_suspect(neigh);
- 			next = now + neigh->parms->delay_probe_time;
+ 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
  		} else {
  			neigh_dbg(2, "neigh %p is suspected\n", neigh);
  			neigh->nud_state = NUD_STALE;
@@@ -916,7 -920,8 +920,8 @@@
  		}
  	} else if (state & NUD_DELAY) {
  		if (time_before_eq(now,
- 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
+ 				   neigh->confirmed +
+ 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
  			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
  			neigh->nud_state = NUD_REACHABLE;
  			neigh->updated = jiffies;
@@@ -928,11 -933,11 +933,11 @@@
  			neigh->nud_state = NUD_PROBE;
  			neigh->updated = jiffies;
  			atomic_set(&neigh->probes, 0);
- 			next = now + neigh->parms->retrans_time;
+ 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
  		}
  	} else {
  		/* NUD_PROBE|NUD_INCOMPLETE */
- 		next = now + neigh->parms->retrans_time;
+ 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
  	}
  
  	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
@@@ -973,13 -978,16 +978,16 @@@ int __neigh_event_send(struct neighbou
  		goto out_unlock_bh;
  
  	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
- 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
+ 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
+ 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
  			unsigned long next, now = jiffies;
  
- 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
+ 			atomic_set(&neigh->probes,
+ 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
  			neigh->nud_state     = NUD_INCOMPLETE;
  			neigh->updated = now;
- 			next = now + max(neigh->parms->retrans_time, HZ/2);
+ 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+ 					 HZ/2);
  			neigh_add_timer(neigh, next);
  			immediate_probe = true;
  		} else {
@@@ -994,14 -1002,14 +1002,14 @@@
  		neigh_dbg(2, "neigh %p is delayed\n", neigh);
  		neigh->nud_state = NUD_DELAY;
  		neigh->updated = jiffies;
- 		neigh_add_timer(neigh,
- 				jiffies + neigh->parms->delay_probe_time);
+ 		neigh_add_timer(neigh, jiffies +
+ 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
  	}
  
  	if (neigh->nud_state == NUD_INCOMPLETE) {
  		if (skb) {
  			while (neigh->arp_queue_len_bytes + skb->truesize >
- 			       neigh->parms->queue_len_bytes) {
+ 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
  				struct sk_buff *buff;
  
  				buff = __skb_dequeue(&neigh->arp_queue);
@@@ -1171,7 -1179,7 +1179,7 @@@ int neigh_update(struct neighbour *neig
  		neigh_update_hhs(neigh);
  		if (!(new & NUD_CONNECTED))
  			neigh->confirmed = jiffies -
- 				      (neigh->parms->base_reachable_time << 1);
+ 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
  		notify = 1;
  	}
  	if (new == old)
@@@ -1231,6 -1239,21 +1239,21 @@@ out
  }
  EXPORT_SYMBOL(neigh_update);
  
+ /* Update the neigh to listen temporarily for probe responses, even if it is
+  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
+  */
+ void __neigh_set_probe_once(struct neighbour *neigh)
+ {
+ 	neigh->updated = jiffies;
+ 	if (!(neigh->nud_state & NUD_FAILED))
+ 		return;
+ 	neigh->nud_state = NUD_PROBE;
+ 	atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
+ 	neigh_add_timer(neigh,
+ 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
+ }
+ EXPORT_SYMBOL(__neigh_set_probe_once);
+ 
  struct neighbour *neigh_event_ns(struct neigh_table *tbl,
  				 u8 *lladdr, void *saddr,
  				 struct net_device *dev)
@@@ -1275,7 -1298,7 +1298,7 @@@ int neigh_compat_output(struct neighbou
  
  	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
  			    skb->len) < 0 &&
 -	    dev->header_ops->rebuild(skb))
 +	    dev_rebuild_header(skb))
  		return 0;
  
  	return dev_queue_xmit(skb);
@@@ -1392,9 -1415,10 +1415,10 @@@ void pneigh_enqueue(struct neigh_table 
  		    struct sk_buff *skb)
  {
  	unsigned long now = jiffies;
- 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
+ 	unsigned long sched_next = now + (net_random() %
+ 					  NEIGH_VAR(p, PROXY_DELAY));
  
- 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
+ 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
  		kfree_skb(skb);
  		return;
  	}
@@@ -1441,7 -1465,7 +1465,7 @@@ struct neigh_parms *neigh_parms_alloc(s
  		p->tbl		  = tbl;
  		atomic_set(&p->refcnt, 1);
  		p->reachable_time =
- 				neigh_rand_reach_time(p->base_reachable_time);
+ 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
  		dev_hold(dev);
  		p->dev = dev;
  		write_pnet(&p->net, hold_net(net));
@@@ -1458,6 -1482,8 +1482,8 @@@
  		p->next		= tbl->parms.next;
  		tbl->parms.next = p;
  		write_unlock_bh(&tbl->lock);
+ 
+ 		neigh_parms_data_state_cleanall(p);
  	}
  	return p;
  }
@@@ -1510,7 -1536,7 +1536,7 @@@ static void neigh_table_init_no_netlink
  	write_pnet(&tbl->parms.net, &init_net);
  	atomic_set(&tbl->parms.refcnt, 1);
  	tbl->parms.reachable_time =
- 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
+ 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
  
  	tbl->stats = alloc_percpu(struct neigh_statistics);
  	if (!tbl->stats)
@@@ -1778,24 -1804,32 +1804,32 @@@ static int neightbl_fill_parms(struct s
  	if ((parms->dev &&
  	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
  	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
- 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
+ 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
+ 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
  	    /* approximative value for deprecated QUEUE_LEN (in packets) */
  	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
- 			parms->queue_len_bytes / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
- 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
- 	    nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
- 	    nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
- 	    nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
+ 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
+ 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
+ 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
+ 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
+ 			NEIGH_VAR(parms, UCAST_PROBES)) ||
+ 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
+ 			NEIGH_VAR(parms, MCAST_PROBES)) ||
  	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
  	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
- 			  parms->base_reachable_time) ||
- 	    nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
+ 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
+ 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
+ 			  NEIGH_VAR(parms, GC_STALETIME)) ||
  	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
- 			  parms->delay_probe_time) ||
- 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
- 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
- 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
- 	    nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
+ 			  NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
+ 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
+ 			  NEIGH_VAR(parms, RETRANS_TIME)) ||
+ 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
+ 			  NEIGH_VAR(parms, ANYCAST_DELAY)) ||
+ 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
+ 			  NEIGH_VAR(parms, PROXY_DELAY)) ||
+ 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
+ 			  NEIGH_VAR(parms, LOCKTIME)))
  		goto nla_put_failure;
  	return nla_nest_end(skb, nest);
  
@@@ -2011,44 -2045,54 +2045,54 @@@ static int neightbl_set(struct sk_buff 
  
  			switch (i) {
  			case NDTPA_QUEUE_LEN:
- 				p->queue_len_bytes = nla_get_u32(tbp[i]) *
- 						     SKB_TRUESIZE(ETH_FRAME_LEN);
+ 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
+ 					      nla_get_u32(tbp[i]) *
+ 					      SKB_TRUESIZE(ETH_FRAME_LEN));
  				break;
  			case NDTPA_QUEUE_LENBYTES:
- 				p->queue_len_bytes = nla_get_u32(tbp[i]);
+ 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
+ 					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_PROXY_QLEN:
- 				p->proxy_qlen = nla_get_u32(tbp[i]);
+ 				NEIGH_VAR_SET(p, PROXY_QLEN,
+ 					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_APP_PROBES:
- 				p->app_probes = nla_get_u32(tbp[i]);
+ 				NEIGH_VAR_SET(p, APP_PROBES,
+ 					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_UCAST_PROBES:
- 				p->ucast_probes = nla_get_u32(tbp[i]);
+ 				NEIGH_VAR_SET(p, UCAST_PROBES,
+ 					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_MCAST_PROBES:
- 				p->mcast_probes = nla_get_u32(tbp[i]);
+ 				NEIGH_VAR_SET(p, MCAST_PROBES,
+ 					      nla_get_u32(tbp[i]));
  				break;
  			case NDTPA_BASE_REACHABLE_TIME:
- 				p->base_reachable_time = nla_get_msecs(tbp[i]);
+ 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
+ 					      nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_GC_STALETIME:
- 				p->gc_staletime = nla_get_msecs(tbp[i]);
+ 				NEIGH_VAR_SET(p, GC_STALETIME,
+ 					      nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_DELAY_PROBE_TIME:
- 				p->delay_probe_time = nla_get_msecs(tbp[i]);
+ 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
+ 					      nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_RETRANS_TIME:
- 				p->retrans_time = nla_get_msecs(tbp[i]);
+ 				NEIGH_VAR_SET(p, RETRANS_TIME,
+ 					      nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_ANYCAST_DELAY:
- 				p->anycast_delay = nla_get_msecs(tbp[i]);
+ 				NEIGH_VAR_SET(p, ANYCAST_DELAY, nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_PROXY_DELAY:
- 				p->proxy_delay = nla_get_msecs(tbp[i]);
+ 				NEIGH_VAR_SET(p, PROXY_DELAY, nla_get_msecs(tbp[i]));
  				break;
  			case NDTPA_LOCKTIME:
- 				p->locktime = nla_get_msecs(tbp[i]);
+ 				NEIGH_VAR_SET(p, LOCKTIME, nla_get_msecs(tbp[i]));
  				break;
  			}
  		}
@@@ -2789,133 -2833,167 +2833,167 @@@ static int proc_unres_qlen(struct ctl_t
  	return ret;
  }
  
- enum {
- 	NEIGH_VAR_MCAST_PROBE,
- 	NEIGH_VAR_UCAST_PROBE,
- 	NEIGH_VAR_APP_PROBE,
- 	NEIGH_VAR_RETRANS_TIME,
- 	NEIGH_VAR_BASE_REACHABLE_TIME,
- 	NEIGH_VAR_DELAY_PROBE_TIME,
- 	NEIGH_VAR_GC_STALETIME,
- 	NEIGH_VAR_QUEUE_LEN,
- 	NEIGH_VAR_QUEUE_LEN_BYTES,
- 	NEIGH_VAR_PROXY_QLEN,
- 	NEIGH_VAR_ANYCAST_DELAY,
- 	NEIGH_VAR_PROXY_DELAY,
- 	NEIGH_VAR_LOCKTIME,
- 	NEIGH_VAR_RETRANS_TIME_MS,
- 	NEIGH_VAR_BASE_REACHABLE_TIME_MS,
- 	NEIGH_VAR_GC_INTERVAL,
- 	NEIGH_VAR_GC_THRESH1,
- 	NEIGH_VAR_GC_THRESH2,
- 	NEIGH_VAR_GC_THRESH3,
- 	NEIGH_VAR_MAX
- };
+ static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
+ 						   int family)
+ {
+ 	switch (family) {
+ 	case AF_INET:
+ 		return __in_dev_arp_parms_get_rcu(dev);
+ 	case AF_INET6:
+ 		return __in6_dev_nd_parms_get_rcu(dev);
+ 	}
+ 	return NULL;
+ }
+ 
+ static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
+ 				  int index)
+ {
+ 	struct net_device *dev;
+ 	int family = neigh_parms_family(p);
+ 
+ 	rcu_read_lock();
+ 	for_each_netdev_rcu(net, dev) {
+ 		struct neigh_parms *dst_p =
+ 				neigh_get_dev_parms_rcu(dev, family);
+ 
+ 		if (dst_p && !test_bit(index, dst_p->data_state))
+ 			dst_p->data[index] = p->data[index];
+ 	}
+ 	rcu_read_unlock();
+ }
+ 
+ static void neigh_proc_update(struct ctl_table *ctl, int write)
+ {
+ 	struct net_device *dev = ctl->extra1;
+ 	struct neigh_parms *p = ctl->extra2;
+ 	struct net *net = neigh_parms_net(p);
+ 	int index = (int *) ctl->data - p->data;
+ 
+ 	if (!write)
+ 		return;
+ 
+ 	set_bit(index, p->data_state);
+ 	if (!dev) /* NULL dev means this is default value */
+ 		neigh_copy_dflt_parms(net, p, index);
+ }
+ 
+ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
+ 					   void __user *buffer,
+ 					   size_t *lenp, loff_t *ppos)
+ {
+ 	struct ctl_table tmp = *ctl;
+ 	int ret;
+ 
+ 	tmp.extra1 = &zero;
+ 	tmp.extra2 = &int_max;
+ 
+ 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ 	neigh_proc_update(ctl, write);
+ 	return ret;
+ }
+ 
+ int neigh_proc_dointvec(struct ctl_table *ctl, int write,
+ 			void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+ 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ 
+ 	neigh_proc_update(ctl, write);
+ 	return ret;
+ }
+ EXPORT_SYMBOL(neigh_proc_dointvec);
+ 
+ int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
+ 				void __user *buffer,
+ 				size_t *lenp, loff_t *ppos)
+ {
+ 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+ 
+ 	neigh_proc_update(ctl, write);
+ 	return ret;
+ }
+ EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
+ 
+ static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
+ 					      void __user *buffer,
+ 					      size_t *lenp, loff_t *ppos)
+ {
+ 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
+ 
+ 	neigh_proc_update(ctl, write);
+ 	return ret;
+ }
+ 
+ int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+ 				   void __user *buffer,
+ 				   size_t *lenp, loff_t *ppos)
+ {
+ 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+ 
+ 	neigh_proc_update(ctl, write);
+ 	return ret;
+ }
+ EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
+ 
+ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
+ 					  void __user *buffer,
+ 					  size_t *lenp, loff_t *ppos)
+ {
+ 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
+ 
+ 	neigh_proc_update(ctl, write);
+ 	return ret;
+ }
+ 
+ #define NEIGH_PARMS_DATA_OFFSET(index)	\
+ 	(&((struct neigh_parms *) 0)->data[index])
+ 
+ #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
+ 	[NEIGH_VAR_ ## attr] = { \
+ 		.procname	= name, \
+ 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
+ 		.maxlen		= sizeof(int), \
+ 		.mode		= mval, \
+ 		.proc_handler	= proc, \
+ 	}
+ 
+ #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
+ 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
+ 
+ #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
+ 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
+ 
+ #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
+ 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
+ 
+ #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
+ 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
+ 
+ #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
+ 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
+ 
+ #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
+ 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
  
  static struct neigh_sysctl_table {
  	struct ctl_table_header *sysctl_header;
  	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
  } neigh_sysctl_template __read_mostly = {
  	.neigh_vars = {
- 		[NEIGH_VAR_MCAST_PROBE] = {
- 			.procname	= "mcast_solicit",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.extra1 	= &zero,
- 			.extra2		= &int_max,
- 			.proc_handler	= proc_dointvec_minmax,
- 		},
- 		[NEIGH_VAR_UCAST_PROBE] = {
- 			.procname	= "ucast_solicit",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.extra1 	= &zero,
- 			.extra2		= &int_max,
- 			.proc_handler	= proc_dointvec_minmax,
- 		},
- 		[NEIGH_VAR_APP_PROBE] = {
- 			.procname	= "app_solicit",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.extra1 	= &zero,
- 			.extra2		= &int_max,
- 			.proc_handler	= proc_dointvec_minmax,
- 		},
- 		[NEIGH_VAR_RETRANS_TIME] = {
- 			.procname	= "retrans_time",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_dointvec_userhz_jiffies,
- 		},
- 		[NEIGH_VAR_BASE_REACHABLE_TIME] = {
- 			.procname	= "base_reachable_time",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_dointvec_jiffies,
- 		},
- 		[NEIGH_VAR_DELAY_PROBE_TIME] = {
- 			.procname	= "delay_first_probe_time",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_dointvec_jiffies,
- 		},
- 		[NEIGH_VAR_GC_STALETIME] = {
- 			.procname	= "gc_stale_time",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_dointvec_jiffies,
- 		},
- 		[NEIGH_VAR_QUEUE_LEN] = {
- 			.procname	= "unres_qlen",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_unres_qlen,
- 		},
- 		[NEIGH_VAR_QUEUE_LEN_BYTES] = {
- 			.procname	= "unres_qlen_bytes",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.extra1		= &zero,
- 			.proc_handler   = proc_dointvec_minmax,
- 		},
- 		[NEIGH_VAR_PROXY_QLEN] = {
- 			.procname	= "proxy_qlen",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.extra1 	= &zero,
- 			.extra2		= &int_max,
- 			.proc_handler	= proc_dointvec_minmax,
- 		},
- 		[NEIGH_VAR_ANYCAST_DELAY] = {
- 			.procname	= "anycast_delay",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_dointvec_userhz_jiffies,
- 		},
- 		[NEIGH_VAR_PROXY_DELAY] = {
- 			.procname	= "proxy_delay",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_dointvec_userhz_jiffies,
- 		},
- 		[NEIGH_VAR_LOCKTIME] = {
- 			.procname	= "locktime",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_dointvec_userhz_jiffies,
- 		},
- 		[NEIGH_VAR_RETRANS_TIME_MS] = {
- 			.procname	= "retrans_time_ms",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_dointvec_ms_jiffies,
- 		},
- 		[NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
- 			.procname	= "base_reachable_time_ms",
- 			.maxlen		= sizeof(int),
- 			.mode		= 0644,
- 			.proc_handler	= proc_dointvec_ms_jiffies,
- 		},
+ 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
+ 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
+ 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
+ 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
+ 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
+ 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
+ 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
+ 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
+ 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
+ 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
+ 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
+ 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
+ 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
+ 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
+ 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
  		[NEIGH_VAR_GC_INTERVAL] = {
  			.procname	= "gc_interval",
  			.maxlen		= sizeof(int),
@@@ -2951,31 -3029,23 +3029,23 @@@
  };
  
  int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
- 			  char *p_name, proc_handler *handler)
+ 			  proc_handler *handler)
  {
+ 	int i;
  	struct neigh_sysctl_table *t;
- 	const char *dev_name_source = NULL;
+ 	const char *dev_name_source;
  	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
+ 	char *p_name;
  
  	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
  	if (!t)
  		goto err;
  
- 	t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data  = &p->mcast_probes;
- 	t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data  = &p->ucast_probes;
- 	t->neigh_vars[NEIGH_VAR_APP_PROBE].data  = &p->app_probes;
- 	t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data  = &p->retrans_time;
- 	t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data  = &p->base_reachable_time;
- 	t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data  = &p->delay_probe_time;
- 	t->neigh_vars[NEIGH_VAR_GC_STALETIME].data  = &p->gc_staletime;
- 	t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data  = &p->queue_len_bytes;
- 	t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data  = &p->queue_len_bytes;
- 	t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data  = &p->proxy_qlen;
- 	t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data  = &p->anycast_delay;
- 	t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
- 	t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
- 	t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data  = &p->retrans_time;
- 	t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data  = &p->base_reachable_time;
+ 	for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) {
+ 		t->neigh_vars[i].data += (long) p;
+ 		t->neigh_vars[i].extra1 = dev;
+ 		t->neigh_vars[i].extra2 = p;
+ 	}
  
  	if (dev) {
  		dev_name_source = dev->name;
@@@ -2990,26 -3060,32 +3060,32 @@@
  		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
  	}
  
  	if (handler) {
  		/* RetransTime */
  		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
- 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
  		/* ReachableTime */
  		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
- 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
  		/* RetransTime (in milliseconds)*/
  		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
- 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
  		/* ReachableTime (in milliseconds) */
  		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
- 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
  	}
  
  	/* Don't export sysctls to unprivileged users */
  	if (neigh_parms_net(p)->user_ns != &init_user_ns)
  		t->neigh_vars[0].procname = NULL;
  
+ 	switch (neigh_parms_family(p)) {
+ 	case AF_INET:
+ 	      p_name = "ipv4";
+ 	      break;
+ 	case AF_INET6:
+ 	      p_name = "ipv6";
+ 	      break;
+ 	default:
+ 	      BUG();
+ 	}
+ 
  	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
  		p_name, dev_name_source);
  	t->sysctl_header =
diff --combined net/ipv4/udp.c
index a7e4729,d5d24ec..80f649f
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@@ -986,7 -986,7 +986,7 @@@ int udp_sendmsg(struct kiocb *iocb, str
  		fl4 = &fl4_stack;
  		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
  				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
- 				   inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
+ 				   inet_sk_flowi_flags(sk),
  				   faddr, saddr, dport, inet->inet_sport);
  
  		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
@@@ -2478,7 -2478,6 +2478,7 @@@ struct sk_buff *skb_udp_tunnel_segment(
  				       netdev_features_t features)
  {
  	struct sk_buff *segs = ERR_PTR(-EINVAL);
 +	u16 mac_offset = skb->mac_header;
  	int mac_len = skb->mac_len;
  	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
  	__be16 protocol = skb->protocol;
@@@ -2498,11 -2497,8 +2498,11 @@@
  	/* segment inner packet. */
  	enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
  	segs = skb_mac_gso_segment(skb, enc_features);
 -	if (!segs || IS_ERR(segs))
 +	if (!segs || IS_ERR(segs)) {
 +		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
 +				     mac_len);
  		goto out;
 +	}
  
  	outer_hlen = skb_tnl_header_len(skb);
  	skb = segs;
diff --combined net/ipv6/addrconf.c
index 1a341f7,6c16345..392c529
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@@ -442,6 -442,8 +442,8 @@@ static int inet6_netconf_msgsize_devcon
  	if (type == -1 || type == NETCONFA_MC_FORWARDING)
  		size += nla_total_size(4);
  #endif
+ 	if (type == -1 || type == NETCONFA_PROXY_NEIGH)
+ 		size += nla_total_size(4);
  
  	return size;
  }
@@@ -475,6 -477,10 +477,10 @@@ static int inet6_netconf_fill_devconf(s
  			devconf->mc_forwarding) < 0)
  		goto nla_put_failure;
  #endif
+ 	if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
+ 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
+ 		goto nla_put_failure;
+ 
  	return nlmsg_end(skb, nlh);
  
  nla_put_failure:
@@@ -509,6 -515,7 +515,7 @@@ errout
  static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
  	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
  	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
+ 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
  };
  
  static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
@@@ -834,6 -841,8 +841,8 @@@ ipv6_add_addr(struct inet6_dev *idev, c
  		goto out;
  	}
  
+ 	neigh_parms_data_state_setall(idev->nd_parms);
+ 
  	ifa->addr = *addr;
  	if (peer_addr)
  		ifa->peer_addr = *peer_addr;
@@@ -986,12 -995,9 +995,9 @@@ static void ipv6_del_addr(struct inet6_
  	 * --yoshfuji
  	 */
  	if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
- 		struct in6_addr prefix;
  		struct rt6_info *rt;
  
- 		ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
- 
- 		rt = addrconf_get_prefix_route(&prefix,
+ 		rt = addrconf_get_prefix_route(&ifp->addr,
  					       ifp->prefix_len,
  					       ifp->idev->dev,
  					       0, RTF_GATEWAY | RTF_DEFAULT);
@@@ -1024,7 -1030,7 +1030,7 @@@ static int ipv6_create_tempaddr(struct 
  	u32 addr_flags;
  	unsigned long now = jiffies;
  
- 	write_lock(&idev->lock);
+ 	write_lock_bh(&idev->lock);
  	if (ift) {
  		spin_lock_bh(&ift->lock);
  		memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
@@@ -1036,7 -1042,7 +1042,7 @@@
  retry:
  	in6_dev_hold(idev);
  	if (idev->cnf.use_tempaddr <= 0) {
- 		write_unlock(&idev->lock);
+ 		write_unlock_bh(&idev->lock);
  		pr_info("%s: use_tempaddr is disabled\n", __func__);
  		in6_dev_put(idev);
  		ret = -1;
@@@ -1046,7 -1052,7 +1052,7 @@@
  	if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
  		idev->cnf.use_tempaddr = -1;	/*XXX*/
  		spin_unlock_bh(&ifp->lock);
- 		write_unlock(&idev->lock);
+ 		write_unlock_bh(&idev->lock);
  		pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
  			__func__);
  		in6_dev_put(idev);
@@@ -1071,8 -1077,8 +1077,8 @@@
  
  	regen_advance = idev->cnf.regen_max_retry *
  	                idev->cnf.dad_transmits *
- 	                idev->nd_parms->retrans_time / HZ;
- 	write_unlock(&idev->lock);
+ 	                NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
+ 	write_unlock_bh(&idev->lock);
  
  	/* A temporary address is created only if this calculated Preferred
  	 * Lifetime is greater than REGEN_ADVANCE time units.  In particular,
@@@ -1099,7 -1105,7 +1105,7 @@@
  		in6_dev_put(idev);
  		pr_info("%s: retry temporary address regeneration\n", __func__);
  		tmpaddr = &addr;
- 		write_lock(&idev->lock);
+ 		write_lock_bh(&idev->lock);
  		goto retry;
  	}
  
@@@ -1407,7 -1413,7 +1413,7 @@@ try_nextdev
  EXPORT_SYMBOL(ipv6_dev_get_saddr);
  
  int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- 		      unsigned char banned_flags)
+ 		      u32 banned_flags)
  {
  	struct inet6_ifaddr *ifp;
  	int err = -EADDRNOTAVAIL;
@@@ -1424,7 -1430,7 +1430,7 @@@
  }
  
  int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
- 		    unsigned char banned_flags)
+ 		    u32 banned_flags)
  {
  	struct inet6_dev *idev;
  	int err = -EADDRNOTAVAIL;
@@@ -1888,7 -1894,8 +1894,8 @@@ static void ipv6_regen_rndid(unsigned l
  
  	expires = jiffies +
  		idev->cnf.temp_prefered_lft * HZ -
- 		idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
+ 		idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
+ 		NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
  		idev->cnf.max_desync_factor * HZ;
  	if (time_before(expires, jiffies)) {
  		pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
@@@ -2016,6 -2023,73 +2023,73 @@@ static struct inet6_dev *addrconf_add_d
  	return idev;
  }
  
+ static void manage_tempaddrs(struct inet6_dev *idev,
+ 			     struct inet6_ifaddr *ifp,
+ 			     __u32 valid_lft, __u32 prefered_lft,
+ 			     bool create, unsigned long now)
+ {
+ 	u32 flags;
+ 	struct inet6_ifaddr *ift;
+ 
+ 	read_lock_bh(&idev->lock);
+ 	/* update all temporary addresses in the list */
+ 	list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
+ 		int age, max_valid, max_prefered;
+ 
+ 		if (ifp != ift->ifpub)
+ 			continue;
+ 
+ 		/* RFC 4941 section 3.3:
+ 		 * If a received option will extend the lifetime of a public
+ 		 * address, the lifetimes of temporary addresses should
+ 		 * be extended, subject to the overall constraint that no
+ 		 * temporary addresses should ever remain "valid" or "preferred"
+ 		 * for a time longer than (TEMP_VALID_LIFETIME) or
+ 		 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
+ 		 */
+ 		age = (now - ift->cstamp) / HZ;
+ 		max_valid = idev->cnf.temp_valid_lft - age;
+ 		if (max_valid < 0)
+ 			max_valid = 0;
+ 
+ 		max_prefered = idev->cnf.temp_prefered_lft -
+ 			       idev->cnf.max_desync_factor - age;
+ 		if (max_prefered < 0)
+ 			max_prefered = 0;
+ 
+ 		if (valid_lft > max_valid)
+ 			valid_lft = max_valid;
+ 
+ 		if (prefered_lft > max_prefered)
+ 			prefered_lft = max_prefered;
+ 
+ 		spin_lock(&ift->lock);
+ 		flags = ift->flags;
+ 		ift->valid_lft = valid_lft;
+ 		ift->prefered_lft = prefered_lft;
+ 		ift->tstamp = now;
+ 		if (prefered_lft > 0)
+ 			ift->flags &= ~IFA_F_DEPRECATED;
+ 
+ 		spin_unlock(&ift->lock);
+ 		if (!(flags&IFA_F_TENTATIVE))
+ 			ipv6_ifa_notify(0, ift);
+ 	}
+ 
+ 	if ((create || list_empty(&idev->tempaddr_list)) &&
+ 	    idev->cnf.use_tempaddr > 0) {
+ 		/* When a new public address is created as described
+ 		 * in [ADDRCONF], also create a new temporary address.
+ 		 * Also create a temporary address if it's enabled but
+ 		 * no temporary address currently exists.
+ 		 */
+ 		read_unlock_bh(&idev->lock);
+ 		ipv6_create_tempaddr(ifp, NULL);
+ 	} else {
+ 		read_unlock_bh(&idev->lock);
+ 	}
+ }
+ 
  void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
  {
  	struct prefix_info *pinfo;
@@@ -2170,6 -2244,7 +2244,7 @@@ ok
  				return;
  			}
  
+ 			ifp->flags |= IFA_F_MANAGETEMPADDR;
  			update_lft = 0;
  			create = 1;
  			ifp->cstamp = jiffies;
@@@ -2178,9 -2253,8 +2253,8 @@@
  		}
  
  		if (ifp) {
- 			int flags;
+ 			u32 flags;
  			unsigned long now;
- 			struct inet6_ifaddr *ift;
  			u32 stored_lft;
  
  			/* update lifetime (RFC2462 5.5.3 e) */
@@@ -2221,70 -2295,8 +2295,8 @@@
  			} else
  				spin_unlock(&ifp->lock);
  
- 			read_lock_bh(&in6_dev->lock);
- 			/* update all temporary addresses in the list */
- 			list_for_each_entry(ift, &in6_dev->tempaddr_list,
- 					    tmp_list) {
- 				int age, max_valid, max_prefered;
- 
- 				if (ifp != ift->ifpub)
- 					continue;
- 
- 				/*
- 				 * RFC 4941 section 3.3:
- 				 * If a received option will extend the lifetime
- 				 * of a public address, the lifetimes of
- 				 * temporary addresses should be extended,
- 				 * subject to the overall constraint that no
- 				 * temporary addresses should ever remain
- 				 * "valid" or "preferred" for a time longer than
- 				 * (TEMP_VALID_LIFETIME) or
- 				 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
- 				 * respectively.
- 				 */
- 				age = (now - ift->cstamp) / HZ;
- 				max_valid = in6_dev->cnf.temp_valid_lft - age;
- 				if (max_valid < 0)
- 					max_valid = 0;
- 
- 				max_prefered = in6_dev->cnf.temp_prefered_lft -
- 					       in6_dev->cnf.max_desync_factor -
- 					       age;
- 				if (max_prefered < 0)
- 					max_prefered = 0;
- 
- 				if (valid_lft > max_valid)
- 					valid_lft = max_valid;
- 
- 				if (prefered_lft > max_prefered)
- 					prefered_lft = max_prefered;
- 
- 				spin_lock(&ift->lock);
- 				flags = ift->flags;
- 				ift->valid_lft = valid_lft;
- 				ift->prefered_lft = prefered_lft;
- 				ift->tstamp = now;
- 				if (prefered_lft > 0)
- 					ift->flags &= ~IFA_F_DEPRECATED;
- 
- 				spin_unlock(&ift->lock);
- 				if (!(flags&IFA_F_TENTATIVE))
- 					ipv6_ifa_notify(0, ift);
- 			}
- 
- 			if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
- 				/*
- 				 * When a new public address is created as
- 				 * described in [ADDRCONF], also create a new
- 				 * temporary address. Also create a temporary
- 				 * address if it's enabled but no temporary
- 				 * address currently exists.
- 				 */
- 				read_unlock_bh(&in6_dev->lock);
- 				ipv6_create_tempaddr(ifp, NULL);
- 			} else {
- 				read_unlock_bh(&in6_dev->lock);
- 			}
+ 			manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
+ 					 create, now);
  
  			in6_ifa_put(ifp);
  			addrconf_verify(0);
@@@ -2363,10 -2375,11 +2375,11 @@@ err_exit
  /*
   *	Manual configuration of address on an interface
   */
- static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
+ static int inet6_addr_add(struct net *net, int ifindex,
+ 			  const struct in6_addr *pfx,
  			  const struct in6_addr *peer_pfx,
- 			  unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
- 			  __u32 valid_lft)
+ 			  unsigned int plen, __u32 ifa_flags,
+ 			  __u32 prefered_lft, __u32 valid_lft)
  {
  	struct inet6_ifaddr *ifp;
  	struct inet6_dev *idev;
@@@ -2385,6 -2398,9 +2398,9 @@@
  	if (!valid_lft || prefered_lft > valid_lft)
  		return -EINVAL;
  
+ 	if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
+ 		return -EINVAL;
+ 
  	dev = __dev_get_by_index(net, ifindex);
  	if (!dev)
  		return -ENODEV;
@@@ -2425,6 -2441,9 +2441,9 @@@
  		 * manually configured addresses
  		 */
  		addrconf_dad_start(ifp);
+ 		if (ifa_flags & IFA_F_MANAGETEMPADDR)
+ 			manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
+ 					 true, jiffies);
  		in6_ifa_put(ifp);
  		addrconf_verify(0);
  		return 0;
@@@ -3176,7 -3195,8 +3195,8 @@@ static void addrconf_dad_timer(unsigne
  	}
  
  	ifp->dad_probes--;
- 	addrconf_mod_dad_timer(ifp, ifp->idev->nd_parms->retrans_time);
+ 	addrconf_mod_dad_timer(ifp,
+ 			       NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
  	spin_unlock(&ifp->lock);
  	write_unlock(&idev->lock);
  
@@@ -3356,7 -3376,7 +3376,7 @@@ static int if6_seq_show(struct seq_fil
  		   ifp->idev->dev->ifindex,
  		   ifp->prefix_len,
  		   ifp->scope,
- 		   ifp->flags,
+ 		   (u8) ifp->flags,
  		   ifp->idev->dev->name);
  	return 0;
  }
@@@ -3456,12 -3476,7 +3476,12 @@@ restart
  					 &inet6_addr_lst[i], addr_lst) {
  			unsigned long age;
  
 -			if (ifp->flags & IFA_F_PERMANENT)
 +			/* When setting preferred_lft to a value not zero or
 +			 * infinity, while valid_lft is infinity
 +			 * IFA_F_PERMANENT has a non-infinity life time.
 +			 */
 +			if ((ifp->flags & IFA_F_PERMANENT) &&
 +			    (ifp->prefered_lft == INFINITY_LIFE_TIME))
  				continue;
  
  			spin_lock(&ifp->lock);
@@@ -3486,8 -3501,7 +3506,8 @@@
  					ifp->flags |= IFA_F_DEPRECATED;
  				}
  
 -				if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))
 +				if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
 +				    (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
  					next = ifp->tstamp + ifp->valid_lft * HZ;
  
  				spin_unlock(&ifp->lock);
@@@ -3503,7 -3517,7 +3523,7 @@@
  				   !(ifp->flags&IFA_F_TENTATIVE)) {
  				unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
  					ifp->idev->cnf.dad_transmits *
- 					ifp->idev->nd_parms->retrans_time / HZ;
+ 					NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
  
  				if (age >= ifp->prefered_lft - regen_advance) {
  					struct inet6_ifaddr *ifpub = ifp->ifpub;
@@@ -3578,6 -3592,7 +3598,7 @@@ static const struct nla_policy ifa_ipv6
  	[IFA_ADDRESS]		= { .len = sizeof(struct in6_addr) },
  	[IFA_LOCAL]		= { .len = sizeof(struct in6_addr) },
  	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
+ 	[IFA_FLAGS]		= { .len = sizeof(u32) },
  };
  
  static int
@@@ -3601,16 -3616,21 +3622,21 @@@ inet6_rtm_deladdr(struct sk_buff *skb, 
  	return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen);
  }
  
- static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
+ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
  			     u32 prefered_lft, u32 valid_lft)
  {
  	u32 flags;
  	clock_t expires;
  	unsigned long timeout;
+ 	bool was_managetempaddr;
  
  	if (!valid_lft || (prefered_lft > valid_lft))
  		return -EINVAL;
  
+ 	if (ifa_flags & IFA_F_MANAGETEMPADDR &&
+ 	    (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
+ 		return -EINVAL;
+ 
  	timeout = addrconf_timeout_fixup(valid_lft, HZ);
  	if (addrconf_finite_timeout(timeout)) {
  		expires = jiffies_to_clock_t(timeout * HZ);
@@@ -3630,7 -3650,10 +3656,10 @@@
  	}
  
  	spin_lock_bh(&ifp->lock);
- 	ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags;
+ 	was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
+ 	ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
+ 			IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR);
+ 	ifp->flags |= ifa_flags;
  	ifp->tstamp = jiffies;
  	ifp->valid_lft = valid_lft;
  	ifp->prefered_lft = prefered_lft;
@@@ -3641,6 -3664,14 +3670,14 @@@
  
  	addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
  			      expires, flags);
+ 
+ 	if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
+ 		if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
+ 			valid_lft = prefered_lft = 0;
+ 		manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
+ 				 !was_managetempaddr, jiffies);
+ 	}
+ 
  	addrconf_verify(0);
  
  	return 0;
@@@ -3656,7 -3687,7 +3693,7 @@@ inet6_rtm_newaddr(struct sk_buff *skb, 
  	struct inet6_ifaddr *ifa;
  	struct net_device *dev;
  	u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
- 	u8 ifa_flags;
+ 	u32 ifa_flags;
  	int err;
  
  	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
@@@ -3683,8 -3714,10 +3720,10 @@@
  	if (dev == NULL)
  		return -ENODEV;
  
+ 	ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
+ 
  	/* We ignore other flags so far. */
- 	ifa_flags = ifm->ifa_flags & (IFA_F_NODAD | IFA_F_HOMEADDRESS);
+ 	ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR;
  
  	ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
  	if (ifa == NULL) {
@@@ -3708,7 -3741,7 +3747,7 @@@
  	return err;
  }
  
- static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u8 flags,
+ static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
  			  u8 scope, int ifindex)
  {
  	struct ifaddrmsg *ifm;
@@@ -3751,7 -3784,8 +3790,8 @@@ static inline int inet6_ifaddr_msgsize(
  	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
  	       + nla_total_size(16) /* IFA_LOCAL */
  	       + nla_total_size(16) /* IFA_ADDRESS */
- 	       + nla_total_size(sizeof(struct ifa_cacheinfo));
+ 	       + nla_total_size(sizeof(struct ifa_cacheinfo))
+ 	       + nla_total_size(4)  /* IFA_FLAGS */;
  }
  
  static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
@@@ -3767,8 -3801,7 +3807,8 @@@
  	put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
  		      ifa->idev->dev->ifindex);
  
 -	if (!(ifa->flags&IFA_F_PERMANENT)) {
 +	if (!((ifa->flags&IFA_F_PERMANENT) &&
 +	      (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
  		preferred = ifa->prefered_lft;
  		valid = ifa->valid_lft;
  		if (preferred != INFINITY_LIFE_TIME) {
@@@ -3800,6 -3833,9 +3840,9 @@@
  	if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
  		goto error;
  
+ 	if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
+ 		goto error;
+ 
  	return nlmsg_end(skb, nlh);
  
  error:
@@@ -4203,7 -4239,7 +4246,7 @@@ static int inet6_fill_ifla6_attrs(struc
  	ci.max_reasm_len = IPV6_MAXPLEN;
  	ci.tstamp = cstamp_delta(idev->tstamp);
  	ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
- 	ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
+ 	ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
  	if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
  		goto nla_put_failure;
  	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
@@@ -4696,6 -4732,46 +4739,46 @@@ int addrconf_sysctl_disable(struct ctl_
  	return ret;
  }
  
+ static
+ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
+ 			      void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+ 	int *valp = ctl->data;
+ 	int ret;
+ 	int old, new;
+ 
+ 	old = *valp;
+ 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ 	new = *valp;
+ 
+ 	if (write && old != new) {
+ 		struct net *net = ctl->extra2;
+ 
+ 		if (!rtnl_trylock())
+ 			return restart_syscall();
+ 
+ 		if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
+ 			inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+ 						     NETCONFA_IFINDEX_DEFAULT,
+ 						     net->ipv6.devconf_dflt);
+ 		else if (valp == &net->ipv6.devconf_all->proxy_ndp)
+ 			inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+ 						     NETCONFA_IFINDEX_ALL,
+ 						     net->ipv6.devconf_all);
+ 		else {
+ 			struct inet6_dev *idev = ctl->extra1;
+ 
+ 			inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+ 						     idev->dev->ifindex,
+ 						     &idev->cnf);
+ 		}
+ 		rtnl_unlock();
+ 	}
+ 
+ 	return ret;
+ }
+ 
+ 
  static struct addrconf_sysctl_table
  {
  	struct ctl_table_header *sysctl_header;
@@@ -4882,7 -4958,7 +4965,7 @@@
  			.data		= &ipv6_devconf.proxy_ndp,
  			.maxlen		= sizeof(int),
  			.mode		= 0644,
- 			.proc_handler	= proc_dointvec,
+ 			.proc_handler	= addrconf_sysctl_proxy_ndp,
  		},
  		{
  			.procname	= "accept_source_route",
@@@ -4998,7 -5074,7 +5081,7 @@@ static void __addrconf_sysctl_unregiste
  
  static void addrconf_sysctl_register(struct inet6_dev *idev)
  {
- 	neigh_sysctl_register(idev->dev, idev->nd_parms, "ipv6",
+ 	neigh_sysctl_register(idev->dev, idev->nd_parms,
  			      &ndisc_ifinfo_sysctl_change);
  	__addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
  					idev, &idev->cnf);
@@@ -5131,9 -5207,7 +5214,7 @@@ int __init addrconf_init(void
  
  	addrconf_verify(0);
  
- 	err = rtnl_af_register(&inet6_ops);
- 	if (err < 0)
- 		goto errout_af;
+ 	rtnl_af_register(&inet6_ops);
  
  	err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
  			      NULL);
@@@ -5157,7 -5231,6 +5238,6 @@@
  	return 0;
  errout:
  	rtnl_af_unregister(&inet6_ops);
- errout_af:
  	unregister_netdevice_notifier(&ipv6_dev_notf);
  errlo:
  	unregister_pernet_subsys(&addrconf_ops);
diff --combined net/ipv6/ip6_output.c
index e6f9319,788c01a..d1de956
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@@ -336,7 -336,8 +336,8 @@@ int ip6_forward(struct sk_buff *skb
  		goto drop;
  
  	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
- 		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
+ 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ 				 IPSTATS_MIB_INDISCARDS);
  		goto drop;
  	}
  
@@@ -370,8 -371,8 +371,8 @@@
  		/* Force OUTPUT device used as source address */
  		skb->dev = dst->dev;
  		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
- 		IP6_INC_STATS_BH(net,
- 				 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
+ 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ 				 IPSTATS_MIB_INHDRERRORS);
  
  		kfree_skb(skb);
  		return -ETIMEDOUT;
@@@ -384,14 -385,15 +385,15 @@@
  		if (proxied > 0)
  			return ip6_input(skb);
  		else if (proxied < 0) {
- 			IP6_INC_STATS(net, ip6_dst_idev(dst),
- 				      IPSTATS_MIB_INDISCARDS);
+ 			IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ 					 IPSTATS_MIB_INDISCARDS);
  			goto drop;
  		}
  	}
  
  	if (!xfrm6_route_forward(skb)) {
- 		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
+ 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ 				 IPSTATS_MIB_INDISCARDS);
  		goto drop;
  	}
  	dst = skb_dst(skb);
@@@ -448,16 -450,17 +450,17 @@@
  		/* Again, force OUTPUT device used as source address */
  		skb->dev = dst->dev;
  		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
- 		IP6_INC_STATS_BH(net,
- 				 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
- 		IP6_INC_STATS_BH(net,
- 				 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
+ 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ 				 IPSTATS_MIB_INTOOBIGERRORS);
+ 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ 				 IPSTATS_MIB_FRAGFAILS);
  		kfree_skb(skb);
  		return -EMSGSIZE;
  	}
  
  	if (skb_cow(skb, dst->dev->hard_header_len)) {
- 		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
+ 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ 				 IPSTATS_MIB_OUTDISCARDS);
  		goto drop;
  	}
  
@@@ -938,7 -941,6 +941,6 @@@ EXPORT_SYMBOL_GPL(ip6_dst_lookup)
   *	@sk: socket which provides route info
   *	@fl6: flow to lookup
   *	@final_dst: final destination address for ipsec lookup
-  *	@can_sleep: we are in a sleepable context
   *
   *	This function performs a route lookup on the given flow.
   *
@@@ -946,8 -948,7 +948,7 @@@
   *	error code.
   */
  struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
- 				      const struct in6_addr *final_dst,
- 				      bool can_sleep)
+ 				      const struct in6_addr *final_dst)
  {
  	struct dst_entry *dst = NULL;
  	int err;
@@@ -957,8 -958,6 +958,6 @@@
  		return ERR_PTR(err);
  	if (final_dst)
  		fl6->daddr = *final_dst;
- 	if (can_sleep)
- 		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
  
  	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
  }
@@@ -969,7 -968,6 +968,6 @@@ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow)
   *	@sk: socket which provides the dst cache and route info
   *	@fl6: flow to lookup
   *	@final_dst: final destination address for ipsec lookup
-  *	@can_sleep: we are in a sleepable context
   *
   *	This function performs a route lookup on the given flow with the
   *	possibility of using the cached route in the socket if it is valid.
@@@ -980,8 -978,7 +978,7 @@@
   *	error code.
   */
  struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
- 					 const struct in6_addr *final_dst,
- 					 bool can_sleep)
+ 					 const struct in6_addr *final_dst)
  {
  	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
  	int err;
@@@ -993,8 -990,6 +990,6 @@@
  		return ERR_PTR(err);
  	if (final_dst)
  		fl6->daddr = *final_dst;
- 	if (can_sleep)
- 		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
  
  	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
  }
@@@ -1162,10 -1157,10 +1157,10 @@@ int ip6_append_data(struct sock *sk, in
  		np->cork.hop_limit = hlimit;
  		np->cork.tclass = tclass;
  		if (rt->dst.flags & DST_XFRM_TUNNEL)
- 			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ 			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
  			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
  		else
- 			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ 			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
  			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
  		if (np->frag_size < mtu) {
  			if (np->frag_size)
@@@ -1193,35 -1188,11 +1188,35 @@@
  
  	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
  			(opt ? opt->opt_nflen : 0);
 -	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
 +	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
 +		     sizeof(struct frag_hdr);
  
  	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
 -		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
 -			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
 +		unsigned int maxnonfragsize, headersize;
 +
 +		headersize = sizeof(struct ipv6hdr) +
 +			     (opt ? opt->tot_len : 0) +
 +			     (dst_allfrag(&rt->dst) ?
 +			      sizeof(struct frag_hdr) : 0) +
 +			     rt->rt6i_nfheader_len;
 +
 +		maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
 +				 mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
 +
 +		/* dontfrag active */
 +		if ((cork->length + length > mtu - headersize) && dontfrag &&
 +		    (sk->sk_protocol == IPPROTO_UDP ||
 +		     sk->sk_protocol == IPPROTO_RAW)) {
 +			ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
 +						   sizeof(struct ipv6hdr));
 +			goto emsgsize;
 +		}
 +
 +		if (cork->length + length > maxnonfragsize - headersize) {
 +emsgsize:
 +			ipv6_local_error(sk, EMSGSIZE, fl6,
 +					 mtu - headersize +
 +					 sizeof(struct ipv6hdr));
  			return -EMSGSIZE;
  		}
  	}
@@@ -1246,6 -1217,12 +1241,6 @@@
  	 * --yoshfuji
  	 */
  
 -	if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
 -					   sk->sk_protocol == IPPROTO_RAW)) {
 -		ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
 -		return -EMSGSIZE;
 -	}
 -
  	skb = skb_peek_tail(&sk->sk_write_queue);
  	cork->length += length;
  	if (((length > mtu) ||
@@@ -1285,7 -1262,7 +1280,7 @@@ alloc_new_skb
  			if (skb == NULL || skb_prev == NULL)
  				ip6_append_data_mtu(&mtu, &maxfraglen,
  						    fragheaderlen, skb, rt,
- 						    np->pmtudisc ==
+ 						    np->pmtudisc >=
  						    IPV6_PMTUDISC_PROBE);
  
  			skb_prev = skb;
diff --combined net/ipv6/ip6_tunnel.c
index 7881965,0289421..1e5e240
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@@ -29,7 -29,6 +29,6 @@@
  #include <linux/if.h>
  #include <linux/in.h>
  #include <linux/ip.h>
- #include <linux/if_tunnel.h>
  #include <linux/net.h>
  #include <linux/in6.h>
  #include <linux/netdevice.h>
@@@ -70,7 -69,6 +69,6 @@@ MODULE_ALIAS_NETDEV("ip6tnl0")
  #define IP6_TNL_TRACE(x...) do {;} while(0)
  #endif
  
- #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
  #define IPV6_TCLASS_SHIFT 20
  
  #define HASH_SIZE_SHIFT  5
@@@ -103,25 -101,17 +101,26 @@@ struct ip6_tnl_net 
  
  static struct net_device_stats *ip6_get_stats(struct net_device *dev)
  {
- 	struct pcpu_tstats tmp, sum = { 0 };
 -	struct pcpu_sw_netstats sum = { 0 };
++	struct pcpu_sw_netstats tmp, sum = { 0 };
  	int i;
  
  	for_each_possible_cpu(i) {
 +		unsigned int start;
- 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+ 		const struct pcpu_sw_netstats *tstats =
+ 						   per_cpu_ptr(dev->tstats, i);
  
 -		sum.rx_packets += tstats->rx_packets;
 -		sum.rx_bytes   += tstats->rx_bytes;
 -		sum.tx_packets += tstats->tx_packets;
 -		sum.tx_bytes   += tstats->tx_bytes;
 +		do {
 +			start = u64_stats_fetch_begin_bh(&tstats->syncp);
 +			tmp.rx_packets = tstats->rx_packets;
 +			tmp.rx_bytes = tstats->rx_bytes;
 +			tmp.tx_packets = tstats->tx_packets;
 +			tmp.tx_bytes =  tstats->tx_bytes;
 +		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
 +
 +		sum.rx_packets += tmp.rx_packets;
 +		sum.rx_bytes   += tmp.rx_bytes;
 +		sum.tx_packets += tmp.tx_packets;
 +		sum.tx_bytes   += tmp.tx_bytes;
  	}
  	dev->stats.rx_packets = sum.rx_packets;
  	dev->stats.rx_bytes   = sum.rx_bytes;
@@@ -794,7 -784,7 +793,7 @@@ static int ip6_tnl_rcv(struct sk_buff *
  
  	if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
  					&ipv6h->daddr)) != NULL) {
- 		struct pcpu_tstats *tstats;
+ 		struct pcpu_sw_netstats *tstats;
  
  		if (t->parms.proto != ipproto && t->parms.proto != 0) {
  			rcu_read_unlock();
@@@ -833,10 -823,8 +832,10 @@@
  		}
  
  		tstats = this_cpu_ptr(t->dev->tstats);
 +		u64_stats_update_begin(&tstats->syncp);
  		tstats->rx_packets++;
  		tstats->rx_bytes += skb->len;
 +		u64_stats_update_end(&tstats->syncp);
  
  		netif_rx(skb);
  
@@@ -1142,7 -1130,7 +1141,7 @@@ ip6ip6_tnl_xmit(struct sk_buff *skb, st
  	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
  		fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
  	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
- 		fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+ 		fl6.flowlabel |= ip6_flowlabel(ipv6h);
  	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
  		fl6.flowi6_mark = skb->mark;
  
@@@ -1509,12 -1497,12 +1508,12 @@@ ip6_tnl_dev_init_gen(struct net_device 
  
  	t->dev = dev;
  	t->net = dev_net(dev);
- 	dev->tstats = alloc_percpu(struct pcpu_tstats);
+ 	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
- 		struct pcpu_tstats *ip6_tnl_stats;
+ 		struct pcpu_sw_netstats *ip6_tnl_stats;
  		ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
  		u64_stats_init(&ip6_tnl_stats->syncp);
  	}
diff --combined net/ipv6/ip6_vti.c
index a4564b0,da1d9e4..b50acd5
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@@ -24,7 -24,6 +24,6 @@@
  #include <linux/if.h>
  #include <linux/in.h>
  #include <linux/ip.h>
- #include <linux/if_tunnel.h>
  #include <linux/net.h>
  #include <linux/in6.h>
  #include <linux/netdevice.h>
@@@ -75,6 -74,27 +74,6 @@@ struct vti6_net 
  	struct ip6_tnl __rcu **tnls[2];
  };
  
 -static struct net_device_stats *vti6_get_stats(struct net_device *dev)
 -{
 -	struct pcpu_sw_netstats sum = { 0 };
 -	int i;
 -
 -	for_each_possible_cpu(i) {
 -		const struct pcpu_sw_netstats *tstats =
 -						   per_cpu_ptr(dev->tstats, i);
 -
 -		sum.rx_packets += tstats->rx_packets;
 -		sum.rx_bytes   += tstats->rx_bytes;
 -		sum.tx_packets += tstats->tx_packets;
 -		sum.tx_bytes   += tstats->tx_bytes;
 -	}
 -	dev->stats.rx_packets = sum.rx_packets;
 -	dev->stats.rx_bytes   = sum.rx_bytes;
 -	dev->stats.tx_packets = sum.tx_packets;
 -	dev->stats.tx_bytes   = sum.tx_bytes;
 -	return &dev->stats;
 -}
 -
  #define for_each_vti6_tunnel_rcu(start) \
  	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
  
@@@ -292,7 -312,7 +291,7 @@@ static int vti6_rcv(struct sk_buff *skb
  
  	if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
  				 &ipv6h->daddr)) != NULL) {
- 		struct pcpu_tstats *tstats;
+ 		struct pcpu_sw_netstats *tstats;
  
  		if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
  			rcu_read_unlock();
@@@ -311,10 -331,8 +310,10 @@@
  		}
  
  		tstats = this_cpu_ptr(t->dev->tstats);
 +		u64_stats_update_begin(&tstats->syncp);
  		tstats->rx_packets++;
  		tstats->rx_bytes += skb->len;
 +		u64_stats_update_end(&tstats->syncp);
  
  		skb->mark = 0;
  		secpath_reset(skb);
@@@ -698,7 -716,7 +697,7 @@@ static const struct net_device_ops vti6
  	.ndo_start_xmit = vti6_tnl_xmit,
  	.ndo_do_ioctl	= vti6_ioctl,
  	.ndo_change_mtu = vti6_change_mtu,
 -	.ndo_get_stats	= vti6_get_stats,
 +	.ndo_get_stats64 = ip_tunnel_get_stats64,
  };
  
  /**
@@@ -735,7 -753,7 +734,7 @@@ static inline int vti6_dev_init_gen(str
  
  	t->dev = dev;
  	t->net = dev_net(dev);
- 	dev->tstats = alloc_percpu(struct pcpu_tstats);
+ 	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  	return 0;
diff --combined net/ipv6/route.c
index 4b4944c,266f110..11dac21
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@@ -66,8 -66,9 +66,9 @@@
  #endif
  
  enum rt6_nud_state {
- 	RT6_NUD_FAIL_HARD = -2,
- 	RT6_NUD_FAIL_SOFT = -1,
+ 	RT6_NUD_FAIL_HARD = -3,
+ 	RT6_NUD_FAIL_PROBE = -2,
+ 	RT6_NUD_FAIL_DO_RR = -1,
  	RT6_NUD_SUCCEED = 1
  };
  
@@@ -103,6 -104,36 +104,36 @@@ static struct rt6_info *rt6_get_route_i
  					   const struct in6_addr *gwaddr, int ifindex);
  #endif
  
+ static void rt6_bind_peer(struct rt6_info *rt, int create)
+ {
+ 	struct inet_peer_base *base;
+ 	struct inet_peer *peer;
+ 
+ 	base = inetpeer_base_ptr(rt->_rt6i_peer);
+ 	if (!base)
+ 		return;
+ 
+ 	peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
+ 	if (peer) {
+ 		if (!rt6_set_peer(rt, peer))
+ 			inet_putpeer(peer);
+ 	}
+ }
+ 
+ static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
+ {
+ 	if (rt6_has_peer(rt))
+ 		return rt6_peer_ptr(rt);
+ 
+ 	rt6_bind_peer(rt, create);
+ 	return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
+ }
+ 
+ static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
+ {
+ 	return __rt6_get_peer(rt, 1);
+ }
+ 
  static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
  {
  	struct rt6_info *rt = (struct rt6_info *) dst;
@@@ -311,22 -342,6 +342,6 @@@ static void ip6_dst_destroy(struct dst_
  	}
  }
  
- void rt6_bind_peer(struct rt6_info *rt, int create)
- {
- 	struct inet_peer_base *base;
- 	struct inet_peer *peer;
- 
- 	base = inetpeer_base_ptr(rt->_rt6i_peer);
- 	if (!base)
- 		return;
- 
- 	peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
- 	if (peer) {
- 		if (!rt6_set_peer(rt, peer))
- 			inet_putpeer(peer);
- 	}
- }
- 
  static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
  			   int how)
  {
@@@ -521,7 -536,7 +536,7 @@@ static void rt6_probe(struct rt6_info *
  		work = kmalloc(sizeof(*work), GFP_ATOMIC);
  
  		if (neigh && work)
- 			neigh->updated = jiffies;
+ 			__neigh_set_probe_once(neigh);
  
  		if (neigh)
  			write_unlock(&neigh->lock);
@@@ -577,11 -592,13 +592,13 @@@ static inline enum rt6_nud_state rt6_ch
  #ifdef CONFIG_IPV6_ROUTER_PREF
  		else if (!(neigh->nud_state & NUD_FAILED))
  			ret = RT6_NUD_SUCCEED;
+ 		else
+ 			ret = RT6_NUD_FAIL_PROBE;
  #endif
  		read_unlock(&neigh->lock);
  	} else {
  		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
- 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
+ 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
  	}
  	rcu_read_unlock_bh();
  
@@@ -618,16 -635,17 +635,17 @@@ static struct rt6_info *find_match(stru
  		goto out;
  
  	m = rt6_score_route(rt, oif, strict);
- 	if (m == RT6_NUD_FAIL_SOFT) {
+ 	if (m == RT6_NUD_FAIL_DO_RR) {
  		match_do_rr = true;
  		m = 0; /* lowest valid score */
- 	} else if (m < 0) {
+ 	} else if (m == RT6_NUD_FAIL_HARD) {
  		goto out;
  	}
  
  	if (strict & RT6_LOOKUP_F_REACHABLE)
  		rt6_probe(rt);
  
+ 	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
  	if (m > *mpri) {
  		*do_rr = match_do_rr;
  		*mpri = m;
@@@ -1905,7 -1923,9 +1923,7 @@@ static struct rt6_info *ip6_rt_copy(str
  		else
  			rt->rt6i_gateway = *dest;
  		rt->rt6i_flags = ort->rt6i_flags;
 -		if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
 -		    (RTF_DEFAULT | RTF_ADDRCONF))
 -			rt6_set_from(rt, ort);
 +		rt6_set_from(rt, ort);
  		rt->rt6i_metric = 0;
  
  #ifdef CONFIG_IPV6_SUBTREES
@@@ -2238,7 -2258,7 +2256,7 @@@ void rt6_remove_prefsrc(struct inet6_if
  		.net = net,
  		.addr = &ifp->addr,
  	};
- 	fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
+ 	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
  }
  
  struct arg_dev_net {
@@@ -2265,7 -2285,7 +2283,7 @@@ void rt6_ifdown(struct net *net, struc
  		.net = net,
  	};
  
- 	fib6_clean_all(net, fib6_ifdown, 0, &adn);
+ 	fib6_clean_all(net, fib6_ifdown, &adn);
  	icmp6_clean_all(fib6_ifdown, &adn);
  }
  
@@@ -2320,7 -2340,7 +2338,7 @@@ void rt6_mtu_change(struct net_device *
  		.mtu = mtu,
  	};
  
- 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
+ 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
  }
  
  static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
diff --combined net/ipv6/sit.c
index d3005b3,9937b26..3dfbcf1
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@@ -671,7 -671,7 +671,7 @@@ static int ipip6_rcv(struct sk_buff *sk
  	tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
  				     iph->saddr, iph->daddr);
  	if (tunnel != NULL) {
- 		struct pcpu_tstats *tstats;
+ 		struct pcpu_sw_netstats *tstats;
  
  		if (tunnel->parms.iph.protocol != IPPROTO_IPV6 &&
  		    tunnel->parms.iph.protocol != 0)
@@@ -702,10 -702,8 +702,10 @@@
  		}
  
  		tstats = this_cpu_ptr(tunnel->dev->tstats);
 +		u64_stats_update_begin(&tstats->syncp);
  		tstats->rx_packets++;
  		tstats->rx_bytes += skb->len;
 +		u64_stats_update_end(&tstats->syncp);
  
  		netif_rx(skb);
  
@@@ -926,7 -924,7 +926,7 @@@ static netdev_tx_t ipip6_tunnel_xmit(st
  		if (tunnel->parms.iph.daddr && skb_dst(skb))
  			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
  
 -		if (skb->len > mtu) {
 +		if (skb->len > mtu && !skb_is_gso(skb)) {
  			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  			ip_rt_put(rt);
  			goto tx_error;
@@@ -968,10 -966,8 +968,10 @@@
  	tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
  
  	skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
 -	if (IS_ERR(skb))
 +	if (IS_ERR(skb)) {
 +		ip_rt_put(rt);
  		goto out;
 +	}
  
  	err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
  			    ttl, df, !net_eq(tunnel->net, dev_net(dev)));
@@@ -1365,12 -1361,12 +1365,12 @@@ static int ipip6_tunnel_init(struct net
  	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
  
  	ipip6_tunnel_bind_dev(dev);
- 	dev->tstats = alloc_percpu(struct pcpu_tstats);
+ 	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
- 		struct pcpu_tstats *ipip6_tunnel_stats;
+ 		struct pcpu_sw_netstats *ipip6_tunnel_stats;
  		ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
  		u64_stats_init(&ipip6_tunnel_stats->syncp);
  	}
@@@ -1395,12 -1391,12 +1395,12 @@@ static int __net_init ipip6_fb_tunnel_i
  	iph->ihl		= 5;
  	iph->ttl		= 64;
  
- 	dev->tstats = alloc_percpu(struct pcpu_tstats);
+ 	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  	if (!dev->tstats)
  		return -ENOMEM;
  
  	for_each_possible_cpu(i) {
- 		struct pcpu_tstats *ipip6_fb_stats;
+ 		struct pcpu_sw_netstats *ipip6_fb_stats;
  		ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
  		u64_stats_init(&ipip6_fb_stats->syncp);
  	}
diff --combined net/netfilter/ipvs/ip_vs_nfct.c
index 5a355a4,d5f4151..5882bbf
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@@ -19,8 -19,7 +19,7 @@@
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
-  * along with this program; if not, write to the Free Software
-  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   *
   *
   * Authors:
@@@ -63,7 -62,6 +62,7 @@@
  #include <net/ip_vs.h>
  #include <net/netfilter/nf_conntrack_core.h>
  #include <net/netfilter/nf_conntrack_expect.h>
 +#include <net/netfilter/nf_conntrack_seqadj.h>
  #include <net/netfilter/nf_conntrack_helper.h>
  #include <net/netfilter/nf_conntrack_zones.h>
  
@@@ -98,11 -96,6 +97,11 @@@ ip_vs_update_conntrack(struct sk_buff *
  	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
  		return;
  
 +	/* Applications may adjust TCP seqs */
 +	if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
 +	    !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
 +		return;
 +
  	/*
  	 * The connection is not yet in the hashtable, so we update it.
  	 * CIP->VIP will remain the same, so leave the tuple in
diff --combined net/rose/af_rose.c
index 62ced65,81f94b1..d080eb4
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@@ -1012,7 -1012,7 +1012,7 @@@ int rose_rx_call_request(struct sk_buf
  	make_rose->source_call   = facilities.source_call;
  	make_rose->source_ndigis = facilities.source_ndigis;
  	for (n = 0 ; n < facilities.source_ndigis ; n++)
- 		make_rose->source_digis[n]= facilities.source_digis[n];
+ 		make_rose->source_digis[n] = facilities.source_digis[n];
  	make_rose->neighbour     = neigh;
  	make_rose->device        = dev;
  	make_rose->facilities    = facilities;
@@@ -1253,7 -1253,6 +1253,7 @@@ static int rose_recvmsg(struct kiocb *i
  
  	if (msg->msg_name) {
  		struct sockaddr_rose *srose;
 +		struct full_sockaddr_rose *full_srose = msg->msg_name;
  
  		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
  		srose = msg->msg_name;
@@@ -1261,9 -1260,18 +1261,9 @@@
  		srose->srose_addr   = rose->dest_addr;
  		srose->srose_call   = rose->dest_call;
  		srose->srose_ndigis = rose->dest_ndigis;
 -		if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
 -			struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
 -			for (n = 0 ; n < rose->dest_ndigis ; n++)
 -				full_srose->srose_digis[n] = rose->dest_digis[n];
 -			msg->msg_namelen = sizeof(struct full_sockaddr_rose);
 -		} else {
 -			if (rose->dest_ndigis >= 1) {
 -				srose->srose_ndigis = 1;
 -				srose->srose_digi = rose->dest_digis[0];
 -			}
 -			msg->msg_namelen = sizeof(struct sockaddr_rose);
 -		}
 +		for (n = 0 ; n < rose->dest_ndigis ; n++)
 +			full_srose->srose_digis[n] = rose->dest_digis[n];
 +		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
  	}
  
  	skb_free_datagram(sk, skb);
diff --combined net/sched/act_csum.c
index 11fe1a4,9cc6717..8b1d657
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@@ -37,15 -37,8 +37,8 @@@
  #include <net/tc_act/tc_csum.h>
  
  #define CSUM_TAB_MASK 15
- static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1];
  static u32 csum_idx_gen;
- static DEFINE_RWLOCK(csum_lock);
- 
- static struct tcf_hashinfo csum_hash_info = {
- 	.htab	= tcf_csum_ht,
- 	.hmask	= CSUM_TAB_MASK,
- 	.lock	= &csum_lock,
- };
+ static struct tcf_hashinfo csum_hash_info;
  
  static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
  	[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
@@@ -77,16 -70,16 +70,16 @@@ static int tcf_csum_init(struct net *n
  				     &csum_idx_gen, &csum_hash_info);
  		if (IS_ERR(pc))
  			return PTR_ERR(pc);
 -		p = to_tcf_csum(pc);
  		ret = ACT_P_CREATED;
  	} else {
 -		p = to_tcf_csum(pc);
 -		if (!ovr) {
 -			tcf_hash_release(pc, bind, &csum_hash_info);
 +		if (bind)/* dont override defaults */
 +			return 0;
 +		tcf_hash_release(pc, bind, &csum_hash_info);
 +		if (!ovr)
  			return -EEXIST;
 -		}
  	}
  
 +	p = to_tcf_csum(pc);
  	spin_lock_bh(&p->tcf_lock);
  	p->tcf_action = parm->action;
  	p->update_flags = parm->update_flags;
@@@ -593,6 -586,10 +586,10 @@@ MODULE_LICENSE("GPL")
  
  static int __init csum_init_module(void)
  {
+ 	int err = tcf_hashinfo_init(&csum_hash_info, CSUM_TAB_MASK);
+ 	if (err)
+ 		return err;
+ 
  	return tcf_register_action(&act_csum_ops);
  }
  
diff --combined net/sched/act_gact.c
index eb9ba60,dea9273..af5641c
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@@ -24,15 -24,8 +24,8 @@@
  #include <net/tc_act/tc_gact.h>
  
  #define GACT_TAB_MASK	15
- static struct tcf_common *tcf_gact_ht[GACT_TAB_MASK + 1];
  static u32 gact_idx_gen;
- static DEFINE_RWLOCK(gact_lock);
- 
- static struct tcf_hashinfo gact_hash_info = {
- 	.htab	=	tcf_gact_ht,
- 	.hmask	=	GACT_TAB_MASK,
- 	.lock	=	&gact_lock,
- };
+ static struct tcf_hashinfo gact_hash_info;
  
  #ifdef CONFIG_GACT_PROB
  static int gact_net_rand(struct tcf_gact *gact)
@@@ -102,11 -95,10 +95,11 @@@ static int tcf_gact_init(struct net *ne
  			return PTR_ERR(pc);
  		ret = ACT_P_CREATED;
  	} else {
 -		if (!ovr) {
 -			tcf_hash_release(pc, bind, &gact_hash_info);
 +		if (bind)/* dont override defaults */
 +			return 0;
 +		tcf_hash_release(pc, bind, &gact_hash_info);
 +		if (!ovr)
  			return -EEXIST;
 -		}
  	}
  
  	gact = to_gact(pc);
@@@ -216,6 -208,9 +209,9 @@@ MODULE_LICENSE("GPL")
  
  static int __init gact_init_module(void)
  {
+ 	int err = tcf_hashinfo_init(&gact_hash_info, GACT_TAB_MASK);
+ 	if (err)
+ 		return err;
  #ifdef CONFIG_GACT_PROB
  	pr_info("GACT probability on\n");
  #else
@@@ -227,6 -222,7 +223,7 @@@
  static void __exit gact_cleanup_module(void)
  {
  	tcf_unregister_action(&act_gact_ops);
+ 	tcf_hashinfo_destroy(&gact_hash_info);
  }
  
  module_init(gact_init_module);
diff --combined net/sched/act_ipt.c
index dcbfe8c,e13ecbb..2426369
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@@ -29,15 -29,8 +29,8 @@@
  
  
  #define IPT_TAB_MASK     15
- static struct tcf_common *tcf_ipt_ht[IPT_TAB_MASK + 1];
  static u32 ipt_idx_gen;
- static DEFINE_RWLOCK(ipt_lock);
- 
- static struct tcf_hashinfo ipt_hash_info = {
- 	.htab	=	tcf_ipt_ht,
- 	.hmask	=	IPT_TAB_MASK,
- 	.lock	=	&ipt_lock,
- };
+ static struct tcf_hashinfo ipt_hash_info;
  
  static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook)
  {
@@@ -141,12 -134,10 +134,12 @@@ static int tcf_ipt_init(struct net *net
  			return PTR_ERR(pc);
  		ret = ACT_P_CREATED;
  	} else {
 -		if (!ovr) {
 -			tcf_ipt_release(to_ipt(pc), bind);
 +		if (bind)/* dont override defaults */
 +			return 0;
 +		tcf_ipt_release(to_ipt(pc), bind);
 +
 +		if (!ovr)
  			return -EEXIST;
 -		}
  	}
  	ipt = to_ipt(pc);
  
@@@ -322,7 -313,11 +315,11 @@@ MODULE_ALIAS("act_xt")
  
  static int __init ipt_init_module(void)
  {
- 	int ret1, ret2;
+ 	int ret1, ret2, err;
+ 	err = tcf_hashinfo_init(&ipt_hash_info, IPT_TAB_MASK);
+ 	if (err)
+ 		return err;
+ 
  	ret1 = tcf_register_action(&act_xt_ops);
  	if (ret1 < 0)
  		printk("Failed to load xt action\n");
@@@ -330,9 -325,10 +327,10 @@@
  	if (ret2 < 0)
  		printk("Failed to load ipt action\n");
  
- 	if (ret1 < 0 && ret2 < 0)
+ 	if (ret1 < 0 && ret2 < 0) {
+ 		tcf_hashinfo_destroy(&ipt_hash_info);
  		return ret1;
- 	else
+ 	} else
  		return 0;
  }
  
@@@ -340,6 -336,7 +338,7 @@@ static void __exit ipt_cleanup_module(v
  {
  	tcf_unregister_action(&act_xt_ops);
  	tcf_unregister_action(&act_ipt_ops);
+ 	tcf_hashinfo_destroy(&ipt_hash_info);
  }
  
  module_init(ipt_init_module);
diff --combined net/sched/act_nat.c
index 7686953,921fea4..584e655
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@@ -30,15 -30,9 +30,9 @@@
  
  
  #define NAT_TAB_MASK	15
- static struct tcf_common *tcf_nat_ht[NAT_TAB_MASK + 1];
  static u32 nat_idx_gen;
- static DEFINE_RWLOCK(nat_lock);
  
- static struct tcf_hashinfo nat_hash_info = {
- 	.htab	=	tcf_nat_ht,
- 	.hmask	=	NAT_TAB_MASK,
- 	.lock	=	&nat_lock,
- };
+ static struct tcf_hashinfo nat_hash_info;
  
  static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
  	[TCA_NAT_PARMS]	= { .len = sizeof(struct tc_nat) },
@@@ -70,15 -64,15 +64,15 @@@ static int tcf_nat_init(struct net *net
  				     &nat_idx_gen, &nat_hash_info);
  		if (IS_ERR(pc))
  			return PTR_ERR(pc);
 -		p = to_tcf_nat(pc);
  		ret = ACT_P_CREATED;
  	} else {
 -		p = to_tcf_nat(pc);
 -		if (!ovr) {
 -			tcf_hash_release(pc, bind, &nat_hash_info);
 +		if (bind)
 +			return 0;
 +		tcf_hash_release(pc, bind, &nat_hash_info);
 +		if (!ovr)
  			return -EEXIST;
 -		}
  	}
 +	p = to_tcf_nat(pc);
  
  	spin_lock_bh(&p->tcf_lock);
  	p->old_addr = parm->old_addr;
@@@ -316,12 -310,16 +310,16 @@@ MODULE_LICENSE("GPL")
  
  static int __init nat_init_module(void)
  {
+ 	int err = tcf_hashinfo_init(&nat_hash_info, NAT_TAB_MASK);
+ 	if (err)
+ 		return err;
  	return tcf_register_action(&act_nat_ops);
  }
  
  static void __exit nat_cleanup_module(void)
  {
  	tcf_unregister_action(&act_nat_ops);
+ 	tcf_hashinfo_destroy(&nat_hash_info);
  }
  
  module_init(nat_init_module);
diff --combined net/sched/act_pedit.c
index 7aa2dcd,e2520e9..7291893
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@@ -24,15 -24,9 +24,9 @@@
  #include <net/tc_act/tc_pedit.h>
  
  #define PEDIT_TAB_MASK	15
- static struct tcf_common *tcf_pedit_ht[PEDIT_TAB_MASK + 1];
  static u32 pedit_idx_gen;
- static DEFINE_RWLOCK(pedit_lock);
  
- static struct tcf_hashinfo pedit_hash_info = {
- 	.htab	=	tcf_pedit_ht,
- 	.hmask	=	PEDIT_TAB_MASK,
- 	.lock	=	&pedit_lock,
- };
+ static struct tcf_hashinfo pedit_hash_info;
  
  static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
  	[TCA_PEDIT_PARMS]	= { .len = sizeof(struct tc_pedit) },
@@@ -84,12 -78,10 +78,12 @@@ static int tcf_pedit_init(struct net *n
  		ret = ACT_P_CREATED;
  	} else {
  		p = to_pedit(pc);
 -		if (!ovr) {
 -			tcf_hash_release(pc, bind, &pedit_hash_info);
 +		tcf_hash_release(pc, bind, &pedit_hash_info);
 +		if (bind)
 +			return 0;
 +		if (!ovr)
  			return -EEXIST;
 -		}
 +
  		if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
  			keys = kmalloc(ksize, GFP_KERNEL);
  			if (keys == NULL)
@@@ -254,11 -246,15 +248,15 @@@ MODULE_LICENSE("GPL")
  
  static int __init pedit_init_module(void)
  {
+ 	int err = tcf_hashinfo_init(&pedit_hash_info, PEDIT_TAB_MASK);
+ 	if (err)
+ 		return err;
  	return tcf_register_action(&act_pedit_ops);
  }
  
  static void __exit pedit_cleanup_module(void)
  {
+ 	tcf_hashinfo_destroy(&pedit_hash_info);
  	tcf_unregister_action(&act_pedit_ops);
  }
  
diff --combined net/sched/act_police.c
index ef246d8,819a9a4..9295b86
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@@ -41,15 -41,8 +41,8 @@@ struct tcf_police 
  	container_of(pc, struct tcf_police, common)
  
  #define POL_TAB_MASK     15
- static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
  static u32 police_idx_gen;
- static DEFINE_RWLOCK(police_lock);
- 
- static struct tcf_hashinfo police_hash_info = {
- 	.htab	=	tcf_police_ht,
- 	.hmask	=	POL_TAB_MASK,
- 	.lock	=	&police_lock,
- };
+ static struct tcf_hashinfo police_hash_info;
  
  /* old policer structure from before tc actions */
  struct tc_police_compat {
@@@ -67,18 -60,19 +60,19 @@@
  static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
  			      int type, struct tc_action *a)
  {
+ 	struct hlist_head *head;
  	struct tcf_common *p;
  	int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
  	struct nlattr *nest;
  
- 	read_lock_bh(&police_lock);
+ 	spin_lock_bh(&police_hash_info.lock);
  
  	s_i = cb->args[0];
  
  	for (i = 0; i < (POL_TAB_MASK + 1); i++) {
- 		p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
+ 		head = &police_hash_info.htab[tcf_hash(i, POL_TAB_MASK)];
  
- 		for (; p; p = p->tcfc_next) {
+ 		hlist_for_each_entry_rcu(p, head, tcfc_head) {
  			index++;
  			if (index < s_i)
  				continue;
@@@ -101,7 -95,7 +95,7 @@@
  		}
  	}
  done:
- 	read_unlock_bh(&police_lock);
+ 	spin_unlock_bh(&police_hash_info.lock);
  	if (n_i)
  		cb->args[0] += n_i;
  	return n_i;
@@@ -113,25 -107,16 +107,16 @@@ nla_put_failure
  
  static void tcf_police_destroy(struct tcf_police *p)
  {
- 	unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
- 	struct tcf_common **p1p;
- 
- 	for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
- 		if (*p1p == &p->common) {
- 			write_lock_bh(&police_lock);
- 			*p1p = p->tcf_next;
- 			write_unlock_bh(&police_lock);
- 			gen_kill_estimator(&p->tcf_bstats,
- 					   &p->tcf_rate_est);
- 			/*
- 			 * gen_estimator est_timer() might access p->tcf_lock
- 			 * or bstats, wait a RCU grace period before freeing p
- 			 */
- 			kfree_rcu(p, tcf_rcu);
- 			return;
- 		}
- 	}
- 	WARN_ON(1);
+ 	spin_lock_bh(&police_hash_info.lock);
+ 	hlist_del(&p->tcf_head);
+ 	spin_unlock_bh(&police_hash_info.lock);
+ 	gen_kill_estimator(&p->tcf_bstats,
+ 			   &p->tcf_rate_est);
+ 	/*
+ 	 * gen_estimator est_timer() might access p->tcf_lock
+ 	 * or bstats, wait a RCU grace period before freeing p
+ 	 */
+ 	kfree_rcu(p, tcf_rcu);
  }
  
  static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
@@@ -177,12 -162,10 +162,12 @@@ static int tcf_act_police_locate(struc
  			if (bind) {
  				police->tcf_bindcnt += 1;
  				police->tcf_refcnt += 1;
 +				return 0;
  			}
  			if (ovr)
  				goto override;
 -			return ret;
 +			/* not replacing */
 +			return -EEXIST;
  		}
  	}
  
@@@ -268,10 -251,9 +253,9 @@@ override
  	police->tcf_index = parm->index ? parm->index :
  		tcf_hash_new_index(&police_idx_gen, &police_hash_info);
  	h = tcf_hash(police->tcf_index, POL_TAB_MASK);
- 	write_lock_bh(&police_lock);
- 	police->tcf_next = tcf_police_ht[h];
- 	tcf_police_ht[h] = &police->common;
- 	write_unlock_bh(&police_lock);
+ 	spin_lock_bh(&police_hash_info.lock);
+ 	hlist_add_head(&police->tcf_head, &police_hash_info.htab[h]);
+ 	spin_unlock_bh(&police_hash_info.lock);
  
  	a->priv = police;
  	return ret;
@@@ -279,10 -261,8 +263,8 @@@
  failure_unlock:
  	spin_unlock_bh(&police->tcf_lock);
  failure:
- 	if (P_tab)
- 		qdisc_put_rtab(P_tab);
- 	if (R_tab)
- 		qdisc_put_rtab(R_tab);
+ 	qdisc_put_rtab(P_tab);
+ 	qdisc_put_rtab(R_tab);
  	if (ret == ACT_P_CREATED)
  		kfree(police);
  	return err;
@@@ -416,12 -396,19 +398,19 @@@ static struct tc_action_ops act_police_
  static int __init
  police_init_module(void)
  {
- 	return tcf_register_action(&act_police_ops);
+ 	int err = tcf_hashinfo_init(&police_hash_info, POL_TAB_MASK);
+ 	if (err)
+ 		return err;
+ 	err = tcf_register_action(&act_police_ops);
+ 	if (err)
+ 		tcf_hashinfo_destroy(&police_hash_info);
+ 	return err;
  }
  
  static void __exit
  police_cleanup_module(void)
  {
+ 	tcf_hashinfo_destroy(&police_hash_info);
  	tcf_unregister_action(&act_police_ops);
  }
  
diff --combined net/sched/act_simple.c
index f7b45ab,81aebc1..b44491e
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@@ -25,15 -25,8 +25,8 @@@
  #include <net/tc_act/tc_defact.h>
  
  #define SIMP_TAB_MASK     7
- static struct tcf_common *tcf_simp_ht[SIMP_TAB_MASK + 1];
  static u32 simp_idx_gen;
- static DEFINE_RWLOCK(simp_lock);
- 
- static struct tcf_hashinfo simp_hash_info = {
- 	.htab	=	tcf_simp_ht,
- 	.hmask	=	SIMP_TAB_MASK,
- 	.lock	=	&simp_lock,
- };
+ static struct tcf_hashinfo simp_hash_info;
  
  #define SIMP_MAX_DATA	32
  static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
@@@ -142,13 -135,10 +135,13 @@@ static int tcf_simp_init(struct net *ne
  		ret = ACT_P_CREATED;
  	} else {
  		d = to_defact(pc);
 -		if (!ovr) {
 -			tcf_simp_release(d, bind);
 +
 +		if (bind)
 +			return 0;
 +		tcf_simp_release(d, bind);
 +		if (!ovr)
  			return -EEXIST;
 -		}
 +
  		reset_policy(d, defdata, parm);
  	}
  
@@@ -212,14 -202,23 +205,23 @@@ MODULE_LICENSE("GPL")
  
  static int __init simp_init_module(void)
  {
- 	int ret = tcf_register_action(&act_simp_ops);
+ 	int err, ret;
+ 	err = tcf_hashinfo_init(&simp_hash_info, SIMP_TAB_MASK);
+ 	if (err)
+ 		return err;
+ 
+ 	ret = tcf_register_action(&act_simp_ops);
  	if (!ret)
  		pr_info("Simple TC action Loaded\n");
+ 	else
+ 		tcf_hashinfo_destroy(&simp_hash_info);
+ 
  	return ret;
  }
  
  static void __exit simp_cleanup_module(void)
  {
+ 	tcf_hashinfo_destroy(&simp_hash_info);
  	tcf_unregister_action(&act_simp_ops);
  }
  
diff --combined net/sched/act_skbedit.c
index 8fe9d25,aa0a4c0..0fa1aad
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@@ -11,8 -11,7 +11,7 @@@
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
-  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
-  * Place - Suite 330, Boston, MA 02111-1307 USA.
+  * this program; if not, see <http://www.gnu.org/licenses/>.
   *
   * Author: Alexander Duyck <alexander.h.duyck at intel.com>
   */
@@@ -29,15 -28,8 +28,8 @@@
  #include <net/tc_act/tc_skbedit.h>
  
  #define SKBEDIT_TAB_MASK     15
- static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1];
  static u32 skbedit_idx_gen;
- static DEFINE_RWLOCK(skbedit_lock);
- 
- static struct tcf_hashinfo skbedit_hash_info = {
- 	.htab	=	tcf_skbedit_ht,
- 	.hmask	=	SKBEDIT_TAB_MASK,
- 	.lock	=	&skbedit_lock,
- };
+ static struct tcf_hashinfo skbedit_hash_info;
  
  static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
  		       struct tcf_result *res)
@@@ -120,11 -112,10 +112,11 @@@ static int tcf_skbedit_init(struct net 
  		ret = ACT_P_CREATED;
  	} else {
  		d = to_skbedit(pc);
 -		if (!ovr) {
 -			tcf_hash_release(pc, bind, &skbedit_hash_info);
 +		if (bind)
 +			return 0;
 +		tcf_hash_release(pc, bind, &skbedit_hash_info);
 +		if (!ovr)
  			return -EEXIST;
 -		}
  	}
  
  	spin_lock_bh(&d->tcf_lock);
@@@ -212,11 -203,15 +204,15 @@@ MODULE_LICENSE("GPL")
  
  static int __init skbedit_init_module(void)
  {
+ 	int err = tcf_hashinfo_init(&skbedit_hash_info, SKBEDIT_TAB_MASK);
+ 	if (err)
+ 		return err;
  	return tcf_register_action(&act_skbedit_ops);
  }
  
  static void __exit skbedit_cleanup_module(void)
  {
+ 	tcf_hashinfo_destroy(&skbedit_hash_info);
  	tcf_unregister_action(&act_skbedit_ops);
  }
  
diff --combined net/sctp/outqueue.c
index 59268f6,111516c..9c77947
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@@ -22,9 -22,8 +22,8 @@@
   * See the GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
-  * along with GNU CC; see the file COPYING.  If not, write to
-  * the Free Software Foundation, 59 Temple Place - Suite 330,
-  * Boston, MA 02111-1307, USA.
+  * along with GNU CC; see the file COPYING.  If not, see
+  * <http://www.gnu.org/licenses/>.
   *
   * Please send any bug reports or fixes you make to the
   * email address(es):
@@@ -111,7 -110,7 +110,7 @@@ static inline int sctp_cacc_skip_3_1_d(
  				       struct sctp_transport *transport,
  				       int count_of_newacks)
  {
- 	if (count_of_newacks >=2 && transport != primary)
+ 	if (count_of_newacks >= 2 && transport != primary)
  		return 1;
  	return 0;
  }
@@@ -208,6 -207,8 +207,6 @@@ void sctp_outq_init(struct sctp_associa
  	INIT_LIST_HEAD(&q->retransmit);
  	INIT_LIST_HEAD(&q->sacked);
  	INIT_LIST_HEAD(&q->abandoned);
 -
 -	q->empty = 1;
  }
  
  /* Free the outqueue structure and any related pending chunks.
@@@ -330,6 -331,7 +329,6 @@@ int sctp_outq_tail(struct sctp_outq *q
  				SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
  			else
  				SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
 -			q->empty = 0;
  			break;
  		}
  	} else {
@@@ -468,7 -470,7 +467,7 @@@ void sctp_retransmit(struct sctp_outq *
  	struct net *net = sock_net(q->asoc->base.sk);
  	int error = 0;
  
- 	switch(reason) {
+ 	switch (reason) {
  	case SCTP_RTXR_T3_RTX:
  		SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
  		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
@@@ -651,6 -653,7 +650,6 @@@ redo
  			if (chunk->fast_retransmit == SCTP_NEED_FRTX)
  				chunk->fast_retransmit = SCTP_DONT_FRTX;
  
 -			q->empty = 0;
  			q->asoc->stats.rtxchunks++;
  			break;
  		}
@@@ -1061,6 -1064,8 +1060,6 @@@ static int sctp_outq_flush(struct sctp_
  
  			sctp_transport_reset_timers(transport);
  
 -			q->empty = 0;
 -
  			/* Only let one DATA chunk get bundled with a
  			 * COOKIE-ECHO chunk.
  			 */
@@@ -1083,7 -1088,7 +1082,7 @@@ sctp_flush_out
  	 *
  	 * --xguo
  	 */
- 	while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
+ 	while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) {
  		struct sctp_transport *t = list_entry(ltransport,
  						      struct sctp_transport,
  						      send_ready);
@@@ -1212,7 -1217,7 +1211,7 @@@ int sctp_outq_sack(struct sctp_outq *q
  		 * destinations for which cacc_saw_newack is set.
  		 */
  		if (transport->cacc.cacc_saw_newack)
- 			count_of_newacks ++;
+ 			count_of_newacks++;
  	}
  
  	/* Move the Cumulative TSN Ack Point if appropriate.  */
@@@ -1269,17 -1274,29 +1268,17 @@@
  		 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
  		 asoc->adv_peer_ack_point);
  
 -	/* See if all chunks are acked.
 -	 * Make sure the empty queue handler will get run later.
 -	 */
 -	q->empty = (list_empty(&q->out_chunk_list) &&
 -		    list_empty(&q->retransmit));
 -	if (!q->empty)
 -		goto finish;
 -
 -	list_for_each_entry(transport, transport_list, transports) {
 -		q->empty = q->empty && list_empty(&transport->transmitted);
 -		if (!q->empty)
 -			goto finish;
 -	}
 -
 -	pr_debug("%s: sack queue is empty\n", __func__);
 -finish:
 -	return q->empty;
 +	return sctp_outq_is_empty(q);
  }
  
 -/* Is the outqueue empty?  */
 +/* Is the outqueue empty?
 + * The queue is empty when we have not pending data, no in-flight data
 + * and nothing pending retransmissions.
 + */
  int sctp_outq_is_empty(const struct sctp_outq *q)
  {
 -	return q->empty;
 +	return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
 +	       list_empty(&q->retransmit);
  }
  
  /********************************************************************
diff --combined net/tipc/port.c
index d43f318,5fd4c8c..b742b26
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@@ -251,15 -251,18 +251,15 @@@ struct tipc_port *tipc_createport(struc
  	return p_ptr;
  }
  
 -int tipc_deleteport(u32 ref)
 +int tipc_deleteport(struct tipc_port *p_ptr)
  {
 -	struct tipc_port *p_ptr;
  	struct sk_buff *buf = NULL;
  
 -	tipc_withdraw(ref, 0, NULL);
 -	p_ptr = tipc_port_lock(ref);
 -	if (!p_ptr)
 -		return -EINVAL;
 +	tipc_withdraw(p_ptr, 0, NULL);
  
 -	tipc_ref_discard(ref);
 -	tipc_port_unlock(p_ptr);
 +	spin_lock_bh(p_ptr->lock);
 +	tipc_ref_discard(p_ptr->ref);
 +	spin_unlock_bh(p_ptr->lock);
  
  	k_cancel_timer(&p_ptr->timer);
  	if (p_ptr->connected) {
@@@ -701,36 -704,47 +701,36 @@@ int tipc_set_portimportance(u32 ref, un
  }
  
  
 -int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
 +int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
 +		 struct tipc_name_seq const *seq)
  {
 -	struct tipc_port *p_ptr;
  	struct publication *publ;
  	u32 key;
 -	int res = -EINVAL;
  
 -	p_ptr = tipc_port_lock(ref);
 -	if (!p_ptr)
 +	if (p_ptr->connected)
  		return -EINVAL;
 +	key = p_ptr->ref + p_ptr->pub_count + 1;
 +	if (key == p_ptr->ref)
 +		return -EADDRINUSE;
  
 -	if (p_ptr->connected)
 -		goto exit;
 -	key = ref + p_ptr->pub_count + 1;
 -	if (key == ref) {
 -		res = -EADDRINUSE;
 -		goto exit;
 -	}
  	publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
  				    scope, p_ptr->ref, key);
  	if (publ) {
  		list_add(&publ->pport_list, &p_ptr->publications);
  		p_ptr->pub_count++;
  		p_ptr->published = 1;
 -		res = 0;
 +		return 0;
  	}
 -exit:
 -	tipc_port_unlock(p_ptr);
 -	return res;
 +	return -EINVAL;
  }
  
 -int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
 +int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
 +		  struct tipc_name_seq const *seq)
  {
 -	struct tipc_port *p_ptr;
  	struct publication *publ;
  	struct publication *tpubl;
  	int res = -EINVAL;
  
 -	p_ptr = tipc_port_lock(ref);
 -	if (!p_ptr)
 -		return -EINVAL;
  	if (!seq) {
  		list_for_each_entry_safe(publ, tpubl,
  					 &p_ptr->publications, pport_list) {
@@@ -757,6 -771,7 +757,6 @@@
  	}
  	if (list_empty(&p_ptr->publications))
  		p_ptr->published = 0;
 -	tipc_port_unlock(p_ptr);
  	return res;
  }
  
@@@ -817,17 -832,14 +817,14 @@@ exit
   */
  int __tipc_disconnect(struct tipc_port *tp_ptr)
  {
- 	int res;
- 
  	if (tp_ptr->connected) {
  		tp_ptr->connected = 0;
  		/* let timer expire on it's own to avoid deadlock! */
  		tipc_nodesub_unsubscribe(&tp_ptr->subscription);
- 		res = 0;
- 	} else {
- 		res = -ENOTCONN;
+ 		return 0;
  	}
- 	return res;
+ 
+ 	return -ENOTCONN;
  }
  
  /*
diff --combined net/tipc/socket.c
index e741416,5efdeef..c8341d1
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@@ -239,7 -239,6 +239,6 @@@ static int tipc_sk_create(struct net *n
  int tipc_sock_create_local(int type, struct socket **res)
  {
  	int rc;
- 	struct sock *sk;
  
  	rc = sock_create_lite(AF_TIPC, type, 0, res);
  	if (rc < 0) {
@@@ -248,8 -247,6 +247,6 @@@
  	}
  	tipc_sk_create(&init_net, *res, 0, 1);
  
- 	sk = (*res)->sk;
- 
  	return 0;
  }
  
@@@ -354,7 -351,7 +351,7 @@@ static int release(struct socket *sock
  	 * Delete TIPC port; this ensures no more messages are queued
  	 * (also disconnects an active connection & sends a 'FIN-' to peer)
  	 */
 -	res = tipc_deleteport(tport->ref);
 +	res = tipc_deleteport(tport);
  
  	/* Discard any remaining (connection-based) messages in receive queue */
  	__skb_queue_purge(&sk->sk_receive_queue);
@@@ -386,46 -383,30 +383,46 @@@
   */
  static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
  {
 +	struct sock *sk = sock->sk;
  	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
 -	u32 portref = tipc_sk_port(sock->sk)->ref;
 +	struct tipc_port *tport = tipc_sk_port(sock->sk);
 +	int res = -EINVAL;
  
 -	if (unlikely(!uaddr_len))
 -		return tipc_withdraw(portref, 0, NULL);
 +	lock_sock(sk);
 +	if (unlikely(!uaddr_len)) {
 +		res = tipc_withdraw(tport, 0, NULL);
 +		goto exit;
 +	}
  
 -	if (uaddr_len < sizeof(struct sockaddr_tipc))
 -		return -EINVAL;
 -	if (addr->family != AF_TIPC)
 -		return -EAFNOSUPPORT;
 +	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
 +		res = -EINVAL;
 +		goto exit;
 +	}
 +	if (addr->family != AF_TIPC) {
 +		res = -EAFNOSUPPORT;
 +		goto exit;
 +	}
  
  	if (addr->addrtype == TIPC_ADDR_NAME)
  		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
 -	else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
 -		return -EAFNOSUPPORT;
 +	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
 +		res = -EAFNOSUPPORT;
 +		goto exit;
 +	}
  
  	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
  	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
 -	    (addr->addr.nameseq.type != TIPC_CFG_SRV))
 -		return -EACCES;
 +	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
 +		res = -EACCES;
 +		goto exit;
 +	}
  
 -	return (addr->scope > 0) ?
 -		tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
 -		tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
 +	res = (addr->scope > 0) ?
 +		tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
 +		tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
 +exit:
 +	release_sock(sk);
 +	return res;
  }
  
  /**
@@@ -770,16 -751,11 +767,11 @@@ static int send_stream(struct kiocb *io
  
  	/* Handle special cases where there is no connection */
  	if (unlikely(sock->state != SS_CONNECTED)) {
- 		if (sock->state == SS_UNCONNECTED) {
+ 		if (sock->state == SS_UNCONNECTED)
  			res = send_packet(NULL, sock, m, total_len);
- 			goto exit;
- 		} else if (sock->state == SS_DISCONNECTING) {
- 			res = -EPIPE;
- 			goto exit;
- 		} else {
- 			res = -ENOTCONN;
- 			goto exit;
- 		}
+ 		else
+ 			res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
+ 		goto exit;
  	}
  
  	if (unlikely(m->msg_name)) {
@@@ -1327,14 -1303,12 +1319,12 @@@ static u32 filter_connect(struct tipc_s
  static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
  {
  	struct tipc_msg *msg = buf_msg(buf);
- 	unsigned int limit;
  
  	if (msg_connected(msg))
- 		limit = sysctl_tipc_rmem[2];
- 	else
- 		limit = sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
- 			msg_importance(msg);
- 	return limit;
+ 		return sysctl_tipc_rmem[2];
+ 
+ 	return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
+ 		msg_importance(msg);
  }
  
  /**
@@@ -1530,14 -1504,12 +1520,12 @@@ static int connect(struct socket *sock
  				sock->state != SS_CONNECTING,
  				timeout ? (long)msecs_to_jiffies(timeout)
  					: MAX_SCHEDULE_TIMEOUT);
  		if (res <= 0) {
  			if (res == 0)
  				res = -ETIMEDOUT;
- 			else
- 				; /* leave "res" unchanged */
- 			goto exit;
+ 			return res;
  		}
+ 		lock_sock(sk);
  	}
  
  	if (unlikely(sock->state == SS_DISCONNECTING))

-- 
LinuxNextTracking


More information about the linux-merge mailing list