[linux-next] LinuxNextTracking branch, master, updated. next-20111011

batman at open-mesh.org batman at open-mesh.org
Tue Oct 11 16:06:35 CEST 2011


The following commit has been merged in the master branch:
commit 88c5100c28b02c4b2b2c6f6fafbbd76d90f698b9
Merge: 8083f0fc969d9b5353061a7a6f963405057e26b1 3ee72ca99288f1de95ec9c570e43f531c8799f06
Author: David S. Miller <davem at davemloft.net>
Date:   Fri Oct 7 13:38:43 2011 -0400

    Merge branch 'master' of github.com:davem330/net
    
    Conflicts:
    	net/batman-adv/soft-interface.c

diff --combined Documentation/networking/ip-sysctl.txt
index 98c8d42,ca5cdcd..cb7f314
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@@ -1042,14 -1042,9 +1042,14 @@@ conf/interface/*
  	The functional behaviour for certain settings is different
  	depending on whether local forwarding is enabled or not.
  
- accept_ra - BOOLEAN
+ accept_ra - INTEGER
  	Accept Router Advertisements; autoconfigure using them.
  
 +	It also determines whether or not to transmit Router
 +	Solicitations. If and only if the functional setting is to
 +	accept Router Advertisements, Router Solicitations will be
 +	transmitted.
 +
  	Possible values are:
  		0 Do not accept Router Advertisements.
  		1 Accept Router Advertisements if forwarding is disabled.
@@@ -1111,7 -1106,7 +1111,7 @@@ dad_transmits - INTEGE
  	The amount of Duplicate Address Detection probes to send.
  	Default: 1
  
- forwarding - BOOLEAN
+ forwarding - INTEGER
  	Configure interface-specific Host/Router behaviour.
  
  	Note: It is recommended to have the same setting on all
@@@ -1120,14 -1115,14 +1120,14 @@@
  	Possible values are:
  		0 Forwarding disabled
  		1 Forwarding enabled
 -		2 Forwarding enabled (Hybrid Mode)
  
  	FALSE (0):
  
  	By default, Host behaviour is assumed.  This means:
  
  	1. IsRouter flag is not set in Neighbour Advertisements.
 -	2. Router Solicitations are being sent when necessary.
 +	2. If accept_ra is TRUE (default), transmit Router
 +	   Solicitations.
  	3. If accept_ra is TRUE (default), accept Router
  	   Advertisements (and do autoconfiguration).
  	4. If accept_redirects is TRUE (default), accept Redirects.
@@@ -1138,10 -1133,16 +1138,10 @@@
  	This means exactly the reverse from the above:
  
  	1. IsRouter flag is set in Neighbour Advertisements.
 -	2. Router Solicitations are not sent.
 +	2. Router Solicitations are not sent unless accept_ra is 2.
  	3. Router Advertisements are ignored unless accept_ra is 2.
  	4. Redirects are ignored.
  
 -	TRUE (2):
 -
 -	Hybrid mode. Same behaviour as TRUE, except for:
 -
 -	2. Router Solicitations are being sent when necessary.
 -
  	Default: 0 (disabled) if global forwarding is disabled (default),
  		 otherwise 1 (enabled).
  
diff --combined MAINTAINERS
index 65ca7ea,ace8f9c..aac56f9
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -117,20 -117,20 +117,20 @@@ Maintainers List (try to look for most 
  M:	Philip Blundell <philb at gnu.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/3c505*
 +F:	drivers/net/ethernet/i825xx/3c505*
  
  3C59X NETWORK DRIVER
  M:	Steffen Klassert <klassert at mathematik.tu-chemnitz.de>
  L:	netdev at vger.kernel.org
  S:	Maintained
  F:	Documentation/networking/vortex.txt
 -F:	drivers/net/3c59x.c
 +F:	drivers/net/ethernet/3com/3c59x.c
  
  3CR990 NETWORK DRIVER
  M:	David Dillow <dave at thedillows.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/typhoon*
 +F:	drivers/net/ethernet/3com/typhoon*
  
  3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
  M:	Adam Radford <linuxraid at lsi.com>
@@@ -156,7 -156,7 +156,7 @@@ M:	Realtek linux nic maintainers <nic_s
  M:	Francois Romieu <romieu at fr.zoreil.com>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/r8169.c
 +F:	drivers/net/ethernet/realtek/r8169.c
  
  8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
  M:	Greg Kroah-Hartman <gregkh at suse.de>
@@@ -170,7 -170,8 +170,7 @@@ F:	include/linux/serial_8250.
  8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.]
  L:	netdev at vger.kernel.org
  S:	Orphan / Obsolete
 -F:	drivers/net/*8390*
 -F:	drivers/net/ax88796.c
 +F:	drivers/net/ethernet/8390/
  
  9P FILE SYSTEM
  M:	Eric Van Hensbergen <ericvh at gmail.com>
@@@ -213,7 -214,7 +213,7 @@@ ACENIC DRIVE
  M:	Jes Sorensen <jes at trained-monkey.org>
  L:	linux-acenic at sunsite.dk
  S:	Maintained
 -F:	drivers/net/acenic*
 +F:	drivers/net/ethernet/alteon/acenic*
  
  ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER
  M:	Peter Feuerer <peter at piie.net>
@@@ -745,7 -746,7 +745,7 @@@ L:	linux-arm-kernel at lists.infradead.or
  W:	http://www.arm.linux.org.uk/
  S:	Maintained
  F:	arch/arm/mach-ebsa110/
 -F:	drivers/net/arm/am79c961a.*
 +F:	drivers/net/ethernet/amd/am79c961a.*
  
  ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
  M:	Daniel Ribeiro <drwyrm at gmail.com>
@@@ -1014,8 -1015,7 +1014,8 @@@ F:	arch/arm/include/asm/hardware/ioc.
  F:	arch/arm/include/asm/hardware/iomd.h
  F:	arch/arm/include/asm/hardware/memc.h
  F:	arch/arm/mach-rpc/
 -F:	drivers/net/arm/ether*
 +F:	drivers/net/ethernet/i825xx/ether1*
 +F:	drivers/net/ethernet/seeq/ether3*
  F:	drivers/scsi/arm/
  
  ARM/SHARK MACHINE SUPPORT
@@@ -1127,7 -1127,7 +1127,7 @@@ F:	arch/arm/mach-nuc93x
  F:	drivers/input/keyboard/w90p910_keypad.c
  F:	drivers/input/touchscreen/w90p910_ts.c
  F:	drivers/watchdog/nuc900_wdt.c
 -F:	drivers/net/arm/w90p910_ether.c
 +F:	drivers/net/ethernet/nuvoton/w90p910_ether.c
  F:	drivers/mtd/nand/nuc900_nand.c
  F:	drivers/rtc/rtc-nuc900.c
  F:	drivers/spi/spi_nuc900.c
@@@ -1230,7 -1230,7 +1230,7 @@@ F:	Documentation/aoe
  F:	drivers/block/aoe/
  
  ATHEROS ATH GENERIC UTILITIES
 -M:	"Luis R. Rodriguez" <lrodriguez at atheros.com>
 +M:	"Luis R. Rodriguez" <mcgrof at qca.qualcomm.com>
  L:	linux-wireless at vger.kernel.org
  S:	Supported
  F:	drivers/net/wireless/ath/*
@@@ -1238,7 -1238,7 +1238,7 @@@
  ATHEROS ATH5K WIRELESS DRIVER
  M:	Jiri Slaby <jirislaby at gmail.com>
  M:	Nick Kossifidis <mickflemm at gmail.com>
 -M:	"Luis R. Rodriguez" <lrodriguez at atheros.com>
 +M:	"Luis R. Rodriguez" <mcgrof at qca.qualcomm.com>
  M:	Bob Copeland <me at bobcopeland.com>
  L:	linux-wireless at vger.kernel.org
  L:	ath5k-devel at lists.ath5k.org
@@@ -1246,19 -1246,11 +1246,19 @@@ W:	http://wireless.kernel.org/en/users/
  S:	Maintained
  F:	drivers/net/wireless/ath/ath5k/
  
 +ATHEROS ATH6KL WIRELESS DRIVER
 +M:	Kalle Valo <kvalo at qca.qualcomm.com>
 +L:	linux-wireless at vger.kernel.org
 +W:	http://wireless.kernel.org/en/users/Drivers/ath6kl
 +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git
 +S:	Supported
 +F:	drivers/net/wireless/ath/ath6kl/
 +
  ATHEROS ATH9K WIRELESS DRIVER
 -M:	"Luis R. Rodriguez" <lrodriguez at atheros.com>
 -M:	Jouni Malinen <jmalinen at atheros.com>
 -M:	Vasanthakumar Thiagarajan <vasanth at atheros.com>
 -M:	Senthil Balasubramanian <senthilkumar at atheros.com>
 +M:	"Luis R. Rodriguez" <mcgrof at qca.qualcomm.com>
 +M:	Jouni Malinen <jouni at qca.qualcomm.com>
 +M:	Vasanthakumar Thiagarajan <vthiagar at qca.qualcomm.com>
 +M:	Senthil Balasubramanian <senthilb at qca.qualcomm.com>
  L:	linux-wireless at vger.kernel.org
  L:	ath9k-devel at lists.ath9k.org
  W:	http://wireless.kernel.org/en/users/Drivers/ath9k
@@@ -1290,7 -1282,7 +1290,7 @@@ L:	netdev at vger.kernel.or
  W:	http://sourceforge.net/projects/atl1
  W:	http://atl1.sourceforge.net
  S:	Maintained
 -F:	drivers/net/atlx/
 +F:	drivers/net/ethernet/atheros/
  
  ATM
  M:	Chas Williams <chas at cmf.nrl.navy.mil>
@@@ -1330,7 -1322,7 +1330,7 @@@ F:	include/video/atmel_lcdc.
  ATMEL MACB ETHERNET DRIVER
  M:	Nicolas Ferre <nicolas.ferre at atmel.com>
  S:	Supported
 -F:	drivers/net/macb.*
 +F:	drivers/net/ethernet/cadence/
  
  ATMEL SPI DRIVER
  M:	Nicolas Ferre <nicolas.ferre at atmel.com>
@@@ -1453,7 -1445,7 +1453,7 @@@ BLACKFIN EMAC DRIVE
  L:	uclinux-dist-devel at blackfin.uclinux.org
  W:	http://blackfin.uclinux.org
  S:	Supported
 -F:	drivers/net/bfin_mac.*
 +F:	drivers/net/ethernet/adi/
  
  BLACKFIN RTC DRIVER
  M:	Mike Frysinger <vapier.adi at gmail.com>
@@@ -1534,27 -1526,27 +1534,27 @@@ BROADCOM B44 10/100 ETHERNET DRIVE
  M:	Gary Zambrano <zambrano at broadcom.com>
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/b44.*
 +F:	drivers/net/ethernet/broadcom/b44.*
  
  BROADCOM BNX2 GIGABIT ETHERNET DRIVER
  M:	Michael Chan <mchan at broadcom.com>
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/bnx2.*
 -F:	drivers/net/bnx2_*
 +F:	drivers/net/ethernet/broadcom/bnx2.*
 +F:	drivers/net/ethernet/broadcom/bnx2_*
  
  BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
  M:	Eilon Greenstein <eilong at broadcom.com>
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/bnx2x/
 +F:	drivers/net/ethernet/broadcom/bnx2x/
  
  BROADCOM TG3 GIGABIT ETHERNET DRIVER
  M:	Matt Carlson <mcarlson at broadcom.com>
  M:	Michael Chan <mchan at broadcom.com>
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/tg3.*
 +F:	drivers/net/ethernet/broadcom/tg3.*
  
  BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
  M:	Brett Rudley <brudley at broadcom.com>
@@@ -1583,7 -1575,7 +1583,7 @@@ BROCADE BNA 10 GIGABIT ETHERNET DRIVE
  M:	Rasesh Mody <rmody at brocade.com>
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/bna/
 +F:	drivers/net/ethernet/brocade/bna/
  
  BSG (block layer generic sg v4 driver)
  M:	FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
@@@ -1767,13 -1759,13 +1767,13 @@@ M:	Christian Benvenuti <benve at cisco.com
  M:	Roopa Prabhu <roprabhu at cisco.com>
  M:	David Wang <dwang2 at cisco.com>
  S:	Supported
 -F:	drivers/net/enic/
 +F:	drivers/net/ethernet/cisco/enic/
  
  CIRRUS LOGIC EP93XX ETHERNET DRIVER
  M:	Hartley Sweeten <hsweeten at visionengravers.com>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/arm/ep93xx_eth.c
 +F:	drivers/net/ethernet/cirrus/ep93xx_eth.c
  
  CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER
  M:	Lennert Buytenhek <kernel at wantstofly.org>
@@@ -1913,7 -1905,7 +1913,7 @@@ CPMAC ETHERNET DRIVE
  M:	Florian Fainelli <florian at openwrt.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/cpmac.c
 +F:	drivers/net/ethernet/ti/cpmac.c
  
  CPU FREQUENCY DRIVERS
  M:	Dave Jones <davej at redhat.com>
@@@ -2000,7 -1992,7 +2000,7 @@@ M:	Divy Le Ray <divy at chelsio.com
  L:	netdev at vger.kernel.org
  W:	http://www.chelsio.com
  S:	Supported
 -F:	drivers/net/cxgb3/
 +F:	drivers/net/ethernet/chelsio/cxgb3/
  
  CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
  M:	Steve Wise <swise at chelsio.com>
@@@ -2014,7 -2006,7 +2014,7 @@@ M:	Dimitris Michailidis <dm at chelsio.com
  L:	netdev at vger.kernel.org
  W:	http://www.chelsio.com
  S:	Supported
 -F:	drivers/net/cxgb4/
 +F:	drivers/net/ethernet/chelsio/cxgb4/
  
  CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
  M:	Steve Wise <swise at chelsio.com>
@@@ -2028,14 -2020,14 +2028,14 @@@ M:	Casey Leedom <leedom at chelsio.com
  L:	netdev at vger.kernel.org
  W:	http://www.chelsio.com
  S:	Supported
 -F:	drivers/net/cxgb4vf/
 +F:	drivers/net/ethernet/chelsio/cxgb4vf/
  
  STMMAC ETHERNET DRIVER
  M:	Giuseppe Cavallaro <peppe.cavallaro at st.com>
  L:	netdev at vger.kernel.org
  W:	http://www.stlinux.com
  S:	Supported
 -F:	drivers/net/stmmac/
 +F:	drivers/net/ethernet/stmicro/stmmac/
  
  CYBERPRO FB DRIVER
  M:	Russell King <linux at arm.linux.org.uk>
@@@ -2079,7 -2071,7 +2079,7 @@@ DAVICOM FAST ETHERNET (DMFE) NETWORK DR
  L:	netdev at vger.kernel.org
  S:	Orphan
  F:	Documentation/networking/dmfe.txt
 -F:	drivers/net/tulip/dmfe.c
 +F:	drivers/net/ethernet/tulip/dmfe.c
  
  DC390/AM53C974 SCSI driver
  M:	Kurt Garloff <garloff at suse.de>
@@@ -2118,7 -2110,7 +2118,7 @@@ F:	net/decnet
  DEFXX FDDI NETWORK DRIVER
  M:	"Maciej W. Rozycki" <macro at linux-mips.org>
  S:	Maintained
 -F:	drivers/net/defxx.*
 +F:	drivers/net/fddi/defxx.*
  
  DELL LAPTOP DRIVER
  M:	Matthew Garrett <mjg59 at srcf.ucam.org>
@@@ -2471,7 -2463,7 +2471,7 @@@ EHEA (IBM pSeries eHEA 10Gb ethernet ad
  M:	Breno Leitao <leitao at linux.vnet.ibm.com>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/ehea/
 +F:	drivers/net/ethernet/ibm/ehea/
  
  EMBEDDED LINUX
  M:	Paul Gortmaker <paul.gortmaker at windriver.com>
@@@ -2516,7 -2508,7 +2516,7 @@@ ETHEREXPRESS-16 NETWORK DRIVE
  M:	Philip Blundell <philb at gnu.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/eexpress.*
 +F:	drivers/net/ethernet/i825xx/eexpress.*
  
  ETHERNET BRIDGE
  M:	Stephen Hemminger <shemminger at linux-foundation.org>
@@@ -2530,7 -2522,7 +2530,7 @@@ F:	net/bridge
  ETHERTEAM 16I DRIVER
  M:	Mika Kuoppala <miku at iki.fi>
  S:	Maintained
 -F:	drivers/net/eth16i.c
 +F:	drivers/net/ethernet/fujitsu/eth16i.c
  
  EXT2 FILE SYSTEM
  M:	Jan Kara <jack at suse.cz>
@@@ -2694,7 -2686,7 +2694,7 @@@ M:	Vitaly Bordug <vbordug at ru.mvista.com
  L:	linuxppc-dev at lists.ozlabs.org
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/fs_enet/
 +F:	drivers/net/ethernet/freescale/fs_enet/
  F:	include/linux/fs_enet_pd.h
  
  FREESCALE QUICC ENGINE LIBRARY
@@@ -2716,7 -2708,7 +2716,7 @@@ M:	Li Yang <leoli at freescale.com
  L:	netdev at vger.kernel.org
  L:	linuxppc-dev at lists.ozlabs.org
  S:	Maintained
 -F:	drivers/net/ucc_geth*
 +F:	drivers/net/ethernet/freescale/ucc_geth*
  
  FREESCALE QUICC ENGINE UCC UART DRIVER
  M:	Timur Tabi <timur at freescale.com>
@@@ -3054,7 -3046,6 +3054,7 @@@ S:	Maintaine
  F:	include/linux/hippidevice.h
  F:	include/linux/if_hippi.h
  F:	net/802/hippi.c
 +F:	drivers/net/hippi/
  
  HOST AP DRIVER
  M:	Jouni Malinen <j at w1.fi>
@@@ -3072,7 -3063,7 +3072,7 @@@ F:	drivers/platform/x86/tc1100-wmi.
  HP100:	Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
  M:	Jaroslav Kysela <perex at perex.cz>
  S:	Maintained
 -F:	drivers/net/hp100.*
 +F:	drivers/net/ethernet/hp/hp100.*
  
  HPET:	High Precision Event Timers driver
  M:	Clemens Ladisch <clemens at ladisch.de>
@@@ -3170,7 -3161,7 +3170,7 @@@ IBM Power Virtual Ethernet Device Drive
  M:	Santiago Leon <santil at linux.vnet.ibm.com>
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/ibmveth.*
 +F:	drivers/net/ethernet/ibm/ibmveth.*
  
  IBM ServeRAID RAID DRIVER
  P:	Jack Hammer
@@@ -3337,7 -3328,7 +3337,7 @@@ F:	arch/arm/mach-ixp4xx/include/mach/qm
  F:	arch/arm/mach-ixp4xx/include/mach/npe.h
  F:	arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
  F:	arch/arm/mach-ixp4xx/ixp4xx_npe.c
 -F:	drivers/net/arm/ixp4xx_eth.c
 +F:	drivers/net/ethernet/xscale/ixp4xx_eth.c
  F:	drivers/net/wan/ixp4xx_hss.c
  
  INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
@@@ -3349,7 -3340,7 +3349,7 @@@ INTEL IXP2000 ETHERNET DRIVE
  M:	Lennert Buytenhek <kernel at wantstofly.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/ixp2000/
 +F:	drivers/net/ethernet/xscale/ixp2000/
  
  INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
  M:	Jeff Kirsher <jeffrey.t.kirsher at intel.com>
@@@ -3358,13 -3349,13 +3358,13 @@@ M:	Bruce Allan <bruce.w.allan at intel.com
  M:	Carolyn Wyborny <carolyn.wyborny at intel.com>
  M:	Don Skidmore <donald.c.skidmore at intel.com>
  M:	Greg Rose <gregory.v.rose at intel.com>
 -M:	PJ Waskiewicz <peter.p.waskiewicz.jr at intel.com>
 +M:	Peter P Waskiewicz Jr <peter.p.waskiewicz.jr at intel.com>
  M:	Alex Duyck <alexander.h.duyck at intel.com>
  M:	John Ronciak <john.ronciak at intel.com>
  L:	e1000-devel at lists.sourceforge.net
  W:	http://e1000.sourceforge.net/
 -T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git
 -T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git
 +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
 +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
  S:	Supported
  F:	Documentation/networking/e100.txt
  F:	Documentation/networking/e1000.txt
@@@ -3374,7 -3365,14 +3374,7 @@@ F:	Documentation/networking/igbvf.tx
  F:	Documentation/networking/ixgb.txt
  F:	Documentation/networking/ixgbe.txt
  F:	Documentation/networking/ixgbevf.txt
 -F:	drivers/net/e100.c
 -F:	drivers/net/e1000/
 -F:	drivers/net/e1000e/
 -F:	drivers/net/igb/
 -F:	drivers/net/igbvf/
 -F:	drivers/net/ixgb/
 -F:	drivers/net/ixgbe/
 -F:	drivers/net/ixgbevf/
 +F:	drivers/net/ethernet/intel/
  
  INTEL MRST PMU DRIVER
  M:	Len Brown <len.brown at intel.com>
@@@ -3426,7 -3424,7 +3426,7 @@@ M:	Wey-Yi Guy <wey-yi.w.guy at intel.com
  M:	Intel Linux Wireless <ilw at linux.intel.com>
  L:	linux-wireless at vger.kernel.org
  W:	http://intellinuxwireless.org
 -T:	git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-2.6.git
 +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
  S:	Supported
  F:	drivers/net/wireless/iwlwifi/
  
@@@ -3442,7 -3440,7 +3442,7 @@@ IOC3 ETHERNET DRIVE
  M:	Ralf Baechle <ralf at linux-mips.org>
  L:	linux-mips at linux-mips.org
  S:	Maintained
 -F:	drivers/net/ioc3-eth.c
 +F:	drivers/net/ethernet/sgi/ioc3-eth.c
  
  IOC3 SERIAL DRIVER
  M:	Pat Gefre <pfg at sgi.com>
@@@ -3460,7 -3458,7 +3460,7 @@@ M:	Francois Romieu <romieu at fr.zoreil.co
  M:	Sorbica Shieh <sorbica at icplus.com.tw>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/ipg.*
 +F:	drivers/net/ethernet/icplus/ipg.*
  
  IPATH DRIVER
  M:	Mike Marciniszyn <infinipath at qlogic.com>
@@@ -3608,7 -3606,7 +3608,7 @@@ JME NETWORK DRIVE
  M:	Guo-Fu Tseng <cooldavid at cooldavid.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/jme.*
 +F:	drivers/net/ethernet/jme.*
  
  JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
  M:	David Woodhouse <dwmw2 at infradead.org>
@@@ -4139,7 -4137,7 +4139,7 @@@ MARVELL MV643XX ETHERNET DRIVE
  M:	Lennert Buytenhek <buytenh at wantstofly.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/mv643xx_eth.*
 +F:	drivers/net/ethernet/marvell/mv643xx_eth.*
  F:	include/linux/mv643xx.h
  
  MARVELL MWIFIEX WIRELESS DRIVER
@@@ -4353,12 -4351,12 +4353,12 @@@ M:	Andrew Gallatin <gallatin at myri.com
  L:	netdev at vger.kernel.org
  W:	http://www.myri.com/scs/download-Myri10GE.html
  S:	Supported
 -F:	drivers/net/myri10ge/
 +F:	drivers/net/ethernet/myricom/myri10ge/
  
  NATSEMI ETHERNET DRIVER (DP8381x)
  M:	Tim Hockin <thockin at hockin.org>
  S:	Maintained
 -F:	drivers/net/natsemi.c
 +F:	drivers/net/ethernet/natsemi/natsemi.c
  
  NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
  M:	Daniel Mack <zonque at gmail.com>
@@@ -4398,8 -4396,9 +4398,8 @@@ W:	http://trac.neterion.com/cgi-bin/tra
  W:	http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
  S:	Supported
  F:	Documentation/networking/s2io.txt
 -F:	drivers/net/s2io*
  F:	Documentation/networking/vxge.txt
 -F:	drivers/net/vxge/
 +F:	drivers/net/ethernet/neterion/
  
  NETFILTER/IPTABLES/IPCHAINS
  P:	Rusty Russell
@@@ -4513,23 -4512,11 +4513,23 @@@ F:	include/linux/if_
  F:	include/linux/*device.h
  
  NETXEN (1/10) GbE SUPPORT
 -M:	Amit Kumar Salecha <amit.salecha at qlogic.com>
 +M:	Sony Chacko <sony.chacko at qlogic.com>
 +M:	Rajesh Borundia <rajesh.borundia at qlogic.com>
  L:	netdev at vger.kernel.org
  W:	http://www.qlogic.com
  S:	Supported
 -F:	drivers/net/netxen/
 +F:	drivers/net/ethernet/qlogic/netxen/
 +
 +NFC SUBSYSTEM
 +M:	Lauro Ramos Venancio <lauro.venancio at openbossa.org>
 +M:	Aloisio Almeida Jr <aloisio.almeida at openbossa.org>
 +M:	Samuel Ortiz <sameo at linux.intel.com>
 +L:	linux-wireless at vger.kernel.org
 +S:	Maintained
 +F:	net/nfc/
 +F:	include/linux/nfc.h
 +F:	include/net/nfc/
 +F:	drivers/nfc/
  
  NFS, SUNRPC, AND LOCKD CLIENTS
  M:	Trond Myklebust <Trond.Myklebust at netapp.com>
@@@ -4550,7 -4537,7 +4550,7 @@@ M:	Jan-Pascal van Best <janpascal at vanbe
  M:	Andreas Mohr <andi at lisas.de>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/ni5010.*
 +F:	drivers/net/ethernet/racal/ni5010.*
  
  NILFS2 FILESYSTEM
  M:	KONISHI Ryusuke <konishi.ryusuke at lab.ntt.co.jp>
@@@ -4816,7 -4803,7 +4816,7 @@@ PA SEMI ETHERNET DRIVE
  M:	Olof Johansson <olof at lixom.net>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/pasemi_mac.*
 +F:	drivers/net/ethernet/pasemi/*
  
  PA SEMI SMBUS DRIVER
  M:	Olof Johansson <olof at lixom.net>
@@@ -4963,7 -4950,7 +4963,7 @@@ PCNET32 NETWORK DRIVE
  M:	Don Fry <pcnet32 at frontier.com>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/pcnet32.c
 +F:	drivers/net/ethernet/amd/pcnet32.c
  
  PCRYPT PARALLEL CRYPTO ENGINE
  M:	Steffen Klassert <steffen.klassert at secunet.com>
@@@ -5095,7 -5082,7 +5095,7 @@@ PPP PROTOCOL DRIVERS AND COMPRESSOR
  M:	Paul Mackerras <paulus at samba.org>
  L:	linux-ppp at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/ppp_*
 +F:	drivers/net/ppp/ppp_*
  
  PPP OVER ATM (RFC 2364)
  M:	Mitchell Blank Jr <mitch at sfgoth.com>
@@@ -5106,8 -5093,8 +5106,8 @@@ F:	include/linux/atmppp.
  PPP OVER ETHERNET
  M:	Michal Ostrowski <mostrows at earthlink.net>
  S:	Maintained
 -F:	drivers/net/pppoe.c
 -F:	drivers/net/pppox.c
 +F:	drivers/net/ppp/pppoe.c
 +F:	drivers/net/ppp/pppox.c
  
  PPP OVER L2TP
  M:	James Chapman <jchapman at katalix.com>
@@@ -5128,7 -5115,7 +5128,7 @@@ PPTP DRIVE
  M:	Dmitry Kozlov <xeb at mail.ru>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/pptp.c
 +F:	drivers/net/ppp/pptp.c
  W:	http://sourceforge.net/projects/accel-pptp
  
  PREEMPTIBLE KERNEL
@@@ -5157,7 -5144,7 +5157,7 @@@ M:	Geoff Levand <geoff at infradead.org
  L:	netdev at vger.kernel.org
  L:	cbe-oss-dev at lists.ozlabs.org
  S:	Maintained
 -F:	drivers/net/ps3_gelic_net.*
 +F:	drivers/net/ethernet/toshiba/ps3_gelic_net.*
  
  PS3 PLATFORM SUPPORT
  M:	Geoff Levand <geoff at infradead.org>
@@@ -5275,24 -5262,23 +5275,24 @@@ M:	linux-driver at qlogic.co
  L:	netdev at vger.kernel.org
  S:	Supported
  F:	Documentation/networking/LICENSE.qla3xxx
 -F:	drivers/net/qla3xxx.*
 +F:	drivers/net/ethernet/qlogic/qla3xxx.*
  
  QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
 -M:	Amit Kumar Salecha <amit.salecha at qlogic.com>
  M:	Anirban Chakraborty <anirban.chakraborty at qlogic.com>
 +M:	Sony Chacko <sony.chacko at qlogic.com>
  M:	linux-driver at qlogic.com
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/qlcnic/
 +F:	drivers/net/ethernet/qlogic/qlcnic/
  
  QLOGIC QLGE 10Gb ETHERNET DRIVER
 +M:	Anirban Chakraborty <anirban.chakraborty at qlogic.com>
  M:	Jitendra Kalsaria <jitendra.kalsaria at qlogic.com>
  M:	Ron Mercer <ron.mercer at qlogic.com>
  M:	linux-driver at qlogic.com
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/qlge/
 +F:	drivers/net/ethernet/qlogic/qlge/
  
  QNX4 FILESYSTEM
  M:	Anders Larsen <al at alarsen.net>
@@@ -5374,7 -5360,7 +5374,7 @@@ RDC R6040 FAST ETHERNET DRIVE
  M:	Florian Fainelli <florian at openwrt.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/r6040.c
 +F:	drivers/net/ethernet/rdc/r6040.c
  
  RDS - RELIABLE DATAGRAM SOCKETS
  M:	Andy Grover <andy.grover at oracle.com>
@@@ -5778,7 -5764,7 +5778,7 @@@ M:	Ajit Khaparde <ajit.khaparde at emulex.
  L:	netdev at vger.kernel.org
  W:	http://www.emulex.com
  S:	Supported
 -F:	drivers/net/benet/
 +F:	drivers/net/ethernet/emulex/benet/
  
  SFC NETWORK DRIVER
  M:	Solarflare linux maintainers <linux-net-drivers at solarflare.com>
@@@ -5786,7 -5772,7 +5786,7 @@@ M:	Steve Hodgson <shodgson at solarflare.c
  M:	Ben Hutchings <bhutchings at solarflare.com>
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/sfc/
 +F:	drivers/net/ethernet/sfc/
  
  SGI GRU DRIVER
  M:	Jack Steiner <steiner at sgi.com>
@@@ -5852,14 -5838,14 +5852,14 @@@ SIS 190 ETHERNET DRIVE
  M:	Francois Romieu <romieu at fr.zoreil.com>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/sis190.c
 +F:	drivers/net/ethernet/sis/sis190.c
  
  SIS 900/7016 FAST ETHERNET DRIVER
  M:	Daniele Venzano <venza at brownhat.org>
  W:	http://www.brownhat.org/sis900.html
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/sis900.*
 +F:	drivers/net/ethernet/sis/sis900.*
  
  SIS 96X I2C/SMBUS DRIVER
  M:	"Mark M. Hoffman" <mhoffman at lightlink.com>
@@@ -5886,7 -5872,8 +5886,7 @@@ SKGE, SKY2 10/100/1000 GIGABIT ETHERNE
  M:	Stephen Hemminger <shemminger at linux-foundation.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/skge.*
 -F:	drivers/net/sky2.*
 +F:	drivers/net/ethernet/marvell/sk*
  
  SLAB ALLOCATOR
  M:	Christoph Lameter <cl at linux-foundation.org>
@@@ -5900,7 -5887,7 +5900,7 @@@ F:	mm/sl?b.
  SMC91x ETHERNET DRIVER
  M:	Nicolas Pitre <nico at fluxnic.net>
  S:	Odd Fixes
 -F:	drivers/net/smc91x.*
 +F:	drivers/net/ethernet/smsc/smc91x.*
  
  SMM665 HARDWARE MONITOR DRIVER
  M:	Guenter Roeck <linux at roeck-us.net>
@@@ -5935,13 -5922,13 +5935,13 @@@ M:	Steve Glendinning <steve.glendinning
  L:	netdev at vger.kernel.org
  S:	Supported
  F:	include/linux/smsc911x.h
 -F:	drivers/net/smsc911x.*
 +F:	drivers/net/ethernet/smsc/smsc911x.*
  
  SMSC9420 PCI ETHERNET DRIVER
  M:	Steve Glendinning <steve.glendinning at smsc.com>
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/smsc9420.*
 +F:	drivers/net/ethernet/smsc/smsc9420.*
  
  SN-IA64 (Itanium) SUB-PLATFORM
  M:	Jes Sorensen <jes at sgi.com>
@@@ -5975,7 -5962,7 +5975,7 @@@ SONIC NETWORK DRIVE
  M:	Thomas Bogendoerfer <tsbogend at alpha.franken.de>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/sonic.*
 +F:	drivers/net/ethernet/natsemi/sonic.*
  
  SONICS SILICON BACKPLANE DRIVER (SSB)
  M:	Michael Buesch <m at bues.ch>
@@@ -6116,7 -6103,7 +6116,7 @@@ M:	Jens Osterkamp <jens at de.ibm.com
  L:	netdev at vger.kernel.org
  S:	Supported
  F:	Documentation/networking/spider_net.txt
 -F:	drivers/net/spider_net*
 +F:	drivers/net/ethernet/toshiba/spider_net*
  
  SPU FILE SYSTEM
  M:	Jeremy Kerr <jk at ozlabs.org>
@@@ -6163,6 -6150,12 +6163,6 @@@ M:	Jakub Schmidtke <sjakub at gmail.com
  S:	Odd Fixes
  F:	drivers/staging/asus_oled/
  
 -STAGING - ATHEROS ATH6KL WIRELESS DRIVER
 -M:	Luis R. Rodriguez <mcgrof at gmail.com>
 -M:	Naveen Singh <nsingh at atheros.com>
 -S:	Odd Fixes
 -F:	drivers/staging/ath6kl/
 -
  STAGING - COMEDI
  M:	Ian Abbott <abbotti at mev.co.uk>
  M:	Mori Hess <fmhess at users.sourceforge.net>
@@@ -6288,7 -6281,7 +6288,7 @@@ F:	drivers/staging/xgifb
  STARFIRE/DURALAN NETWORK DRIVER
  M:	Ion Badulescu <ionut at badula.org>
  S:	Odd Fixes
 -F:	drivers/net/starfire*
 +F:	drivers/net/ethernet/adaptec/starfire*
  
  SUN3/3X
  M:	Sam Creasey <sammy at sammy.net>
@@@ -6297,7 -6290,6 +6297,7 @@@ S:	Maintaine
  F:	arch/m68k/kernel/*sun3*
  F:	arch/m68k/sun3*/
  F:	arch/m68k/include/asm/sun3*
 +F:	drivers/net/ethernet/i825xx/sun3*
  
  SUPERH
  M:	Paul Mundt <lethal at linux-sh.org>
@@@ -6382,11 -6374,10 +6382,10 @@@ S:	Supporte
  F:	arch/arm/mach-tegra
  
  TEHUTI ETHERNET DRIVER
  M:	Andy Gospodarek <andy at greyhouse.net>
  L:	netdev at vger.kernel.org
  S:	Supported
 -F:	drivers/net/tehuti*
 +F:	drivers/net/ethernet/tehuti/*
  
  Telecom Clock Driver for MCPL0010
  M:	Mark Gross <mark.gross at intel.com>
@@@ -6437,7 -6428,7 +6436,7 @@@ W:	http://www.tilera.com/scm
  S:	Supported
  F:	arch/tile/
  F:	drivers/tty/hvc/hvc_tile.c
 -F:	drivers/net/tile/
 +F:	drivers/net/ethernet/tile/
  F:	drivers/edac/tile_edac.c
  
  TLAN NETWORK DRIVER
@@@ -6446,7 -6437,7 +6445,7 @@@ L:	tlan-devel at lists.sourceforge.net (su
  W:	http://sourceforge.net/projects/tlan/
  S:	Maintained
  F:	Documentation/networking/tlan.txt
 -F:	drivers/net/tlan.*
 +F:	drivers/net/ethernet/ti/tlan.*
  
  TOMOYO SECURITY MODULE
  M:	Kentaro Takeda <takedakn at nttdata.co.jp>
@@@ -6540,7 -6531,7 +6539,7 @@@ TULIP NETWORK DRIVER
  M:	Grant Grundler <grundler at parisc-linux.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/tulip/
 +F:	drivers/net/ethernet/tulip/
  
  TUN/TAP driver
  M:	Maxim Krasnyansky <maxk at qualcomm.com>
@@@ -6586,7 -6577,7 +6585,7 @@@ W:	http://uclinux-h8.sourceforge.jp
  S:	Supported
  F:	arch/h8300/
  F:	drivers/ide/ide-h8300.c
 -F:	drivers/net/ne-h8300.c
 +F:	drivers/net/ethernet/8390/ne-h8300.c
  
  UDF FILESYSTEM
  M:	Jan Kara <jack at suse.cz>
@@@ -7014,7 -7005,7 +7013,7 @@@ F:	include/linux/vhost.
  VIA RHINE NETWORK DRIVER
  M:	Roger Luethi <rl at hellgate.ch>
  S:	Maintained
 -F:	drivers/net/via-rhine.c
 +F:	drivers/net/ethernet/via/via-rhine.c
  
  VIAPRO SMBUS DRIVER
  M:	Jean Delvare <khali at linux-fr.org>
@@@ -7042,7 -7033,7 +7041,7 @@@ VIA VELOCITY NETWORK DRIVE
  M:	Francois Romieu <romieu at fr.zoreil.com>
  L:	netdev at vger.kernel.org
  S:	Maintained
 -F:	drivers/net/via-velocity.*
 +F:	drivers/net/ethernet/via/via-velocity.*
  
  VLAN (802.1Q)
  M:	Patrick McHardy <kaber at trash.net>
diff --combined drivers/net/bonding/bond_main.c
index 1dcb07c,6d79b78..6191e63
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@@ -557,7 -557,7 +557,7 @@@ down
  static int bond_update_speed_duplex(struct slave *slave)
  {
  	struct net_device *slave_dev = slave->dev;
 -	struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET };
 +	struct ethtool_cmd ecmd;
  	u32 slave_speed;
  	int res;
  
@@@ -565,15 -565,18 +565,15 @@@
  	slave->speed = SPEED_100;
  	slave->duplex = DUPLEX_FULL;
  
 -	if (!slave_dev->ethtool_ops || !slave_dev->ethtool_ops->get_settings)
 -		return -1;
 -
 -	res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool);
 +	res = __ethtool_get_settings(slave_dev, &ecmd);
  	if (res < 0)
  		return -1;
  
 -	slave_speed = ethtool_cmd_speed(&etool);
 +	slave_speed = ethtool_cmd_speed(&ecmd);
  	if (slave_speed == 0 || slave_speed == ((__u32) -1))
  		return -1;
  
 -	switch (etool.duplex) {
 +	switch (ecmd.duplex) {
  	case DUPLEX_FULL:
  	case DUPLEX_HALF:
  		break;
@@@ -582,7 -585,7 +582,7 @@@
  	}
  
  	slave->speed = slave_speed;
 -	slave->duplex = etool.duplex;
 +	slave->duplex = ecmd.duplex;
  
  	return 0;
  }
@@@ -774,6 -777,9 +774,9 @@@ static void bond_resend_igmp_join_reque
  
  	read_lock(&bond->lock);
  
+ 	if (bond->kill_timers)
+ 		goto out;
+ 
  	/* rejoin all groups on bond device */
  	__bond_resend_igmp_join_requests(bond->dev);
  
@@@ -787,9 -793,9 +790,9 @@@
  			__bond_resend_igmp_join_requests(vlan_dev);
  	}
  
- 	if (--bond->igmp_retrans > 0)
+ 	if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
  		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
- 
+ out:
  	read_unlock(&bond->lock);
  }
  
@@@ -2535,7 -2541,7 +2538,7 @@@ void bond_mii_monitor(struct work_struc
  	}
  
  re_arm:
- 	if (bond->params.miimon)
+ 	if (bond->params.miimon && !bond->kill_timers)
  		queue_delayed_work(bond->wq, &bond->mii_work,
  				   msecs_to_jiffies(bond->params.miimon));
  out:
@@@ -2883,7 -2889,7 +2886,7 @@@ void bond_loadbalance_arp_mon(struct wo
  	}
  
  re_arm:
- 	if (bond->params.arp_interval)
+ 	if (bond->params.arp_interval && !bond->kill_timers)
  		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
  out:
  	read_unlock(&bond->lock);
@@@ -3151,7 -3157,7 +3154,7 @@@ void bond_activebackup_arp_mon(struct w
  	bond_ab_arp_probe(bond);
  
  re_arm:
- 	if (bond->params.arp_interval)
+ 	if (bond->params.arp_interval && !bond->kill_timers)
  		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
  out:
  	read_unlock(&bond->lock);
@@@ -3701,27 -3707,44 +3704,27 @@@ static bool bond_addr_in_mc_list(unsign
  	return false;
  }
  
 -static void bond_set_multicast_list(struct net_device *bond_dev)
 +static void bond_change_rx_flags(struct net_device *bond_dev, int change)
  {
  	struct bonding *bond = netdev_priv(bond_dev);
 -	struct netdev_hw_addr *ha;
 -	bool found;
 -
 -	/*
 -	 * Do promisc before checking multicast_mode
 -	 */
 -	if ((bond_dev->flags & IFF_PROMISC) && !(bond->flags & IFF_PROMISC))
 -		/*
 -		 * FIXME: Need to handle the error when one of the multi-slaves
 -		 * encounters error.
 -		 */
 -		bond_set_promiscuity(bond, 1);
 -
  
 -	if (!(bond_dev->flags & IFF_PROMISC) && (bond->flags & IFF_PROMISC))
 -		bond_set_promiscuity(bond, -1);
 +	if (change & IFF_PROMISC)
 +		bond_set_promiscuity(bond,
 +				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
  
 +	if (change & IFF_ALLMULTI)
 +		bond_set_allmulti(bond,
 +				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
 +}
  
 -	/* set allmulti flag to slaves */
 -	if ((bond_dev->flags & IFF_ALLMULTI) && !(bond->flags & IFF_ALLMULTI))
 -		/*
 -		 * FIXME: Need to handle the error when one of the multi-slaves
 -		 * encounters error.
 -		 */
 -		bond_set_allmulti(bond, 1);
 -
 -
 -	if (!(bond_dev->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI))
 -		bond_set_allmulti(bond, -1);
 -
 +static void bond_set_multicast_list(struct net_device *bond_dev)
 +{
 +	struct bonding *bond = netdev_priv(bond_dev);
 +	struct netdev_hw_addr *ha;
 +	bool found;
  
  	read_lock(&bond->lock);
  
 -	bond->flags = bond_dev->flags;
 -
  	/* looking for addresses to add to slaves' mc list */
  	netdev_for_each_mc_addr(ha, bond_dev) {
  		found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
@@@ -4280,8 -4303,7 +4283,8 @@@ static const struct net_device_ops bond
  	.ndo_select_queue	= bond_select_queue,
  	.ndo_get_stats64	= bond_get_stats,
  	.ndo_do_ioctl		= bond_do_ioctl,
 -	.ndo_set_multicast_list	= bond_set_multicast_list,
 +	.ndo_change_rx_flags	= bond_change_rx_flags,
 +	.ndo_set_rx_mode	= bond_set_multicast_list,
  	.ndo_change_mtu		= bond_change_mtu,
  	.ndo_set_mac_address	= bond_set_mac_address,
  	.ndo_neigh_setup	= bond_neigh_setup,
@@@ -4827,20 -4849,11 +4830,20 @@@ static int bond_validate(struct nlattr 
  	return 0;
  }
  
 +static int bond_get_tx_queues(struct net *net, struct nlattr *tb[],
 +			      unsigned int *num_queues,
 +			      unsigned int *real_num_queues)
 +{
 +	*num_queues = tx_queues;
 +	return 0;
 +}
 +
  static struct rtnl_link_ops bond_link_ops __read_mostly = {
  	.kind		= "bond",
  	.priv_size	= sizeof(struct bonding),
  	.setup		= bond_setup,
  	.validate	= bond_validate,
 +	.get_tx_queues	= bond_get_tx_queues,
  };
  
  /* Create a new bond based on the specified name and bonding parameters.
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0b9bd55,0000000..51bd748
mode 100644,000000..100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@@ -1,2510 -1,0 +1,2511 @@@
 +/* bnx2x_dcb.c: Broadcom Everest network driver.
 + *
 + * Copyright 2009-2011 Broadcom Corporation
 + *
 + * Unless you and Broadcom execute a separate written software license
 + * agreement governing use of this software, this software is licensed to you
 + * under the terms of the GNU General Public License version 2, available
 + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
 + *
 + * Notwithstanding the above, under no circumstances may you combine this
 + * software in any way with any other Broadcom software provided under a
 + * license other than the GPL, without Broadcom's express prior written
 + * consent.
 + *
 + * Maintained by: Eilon Greenstein <eilong at broadcom.com>
 + * Written by: Dmitry Kravkov
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/netdevice.h>
 +#include <linux/types.h>
 +#include <linux/errno.h>
 +#include <linux/rtnetlink.h>
 +#include <net/dcbnl.h>
 +
 +#include "bnx2x.h"
 +#include "bnx2x_cmn.h"
 +#include "bnx2x_dcb.h"
 +
 +/* forward declarations of dcbx related functions */
 +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
 +static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
 +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
 +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
 +static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
 +					  u32 *set_configuration_ets_pg,
 +					  u32 *pri_pg_tbl);
 +static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
 +					    u32 *pg_pri_orginal_spread,
 +					    struct pg_help_data *help_data);
 +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
 +				       struct pg_help_data *help_data,
 +				       struct dcbx_ets_feature *ets,
 +				       u32 *pg_pri_orginal_spread);
 +static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
 +				struct cos_help_data *cos_data,
 +				u32 *pg_pri_orginal_spread,
 +				struct dcbx_ets_feature *ets);
 +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
 +				 struct bnx2x_func_tx_start_params*);
 +
 +/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */
 +static void bnx2x_read_data(struct bnx2x *bp, u32 *buff,
 +				   u32 addr, u32 len)
 +{
 +	int i;
 +	for (i = 0; i < len; i += 4, buff++)
 +		*buff = REG_RD(bp, addr + i);
 +}
 +
 +static void bnx2x_write_data(struct bnx2x *bp, u32 *buff,
 +				    u32 addr, u32 len)
 +{
 +	int i;
 +	for (i = 0; i < len; i += 4, buff++)
 +		REG_WR(bp, addr + i, *buff);
 +}
 +
 +static void bnx2x_pfc_set(struct bnx2x *bp)
 +{
 +	struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
 +	u32 pri_bit, val = 0;
 +	int i;
 +
 +	pfc_params.num_of_rx_cos_priority_mask =
 +					bp->dcbx_port_params.ets.num_of_cos;
 +
 +	/* Tx COS configuration */
 +	for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
 +		/*
 +		 * We configure only the pauseable bits (non pauseable aren't
 +		 * configured at all) it's done to avoid false pauses from
 +		 * network
 +		 */
 +		pfc_params.rx_cos_priority_mask[i] =
 +			bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
 +				& DCBX_PFC_PRI_PAUSE_MASK(bp);
 +
 +	/*
 +	 * Rx COS configuration
 +	 * Changing PFC RX configuration .
 +	 * In RX COS0 will always be configured to lossy and COS1 to lossless
 +	 */
 +	for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
 +		pri_bit = 1 << i;
 +
 +		if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
 +			val |= 1 << (i * 4);
 +	}
 +
 +	pfc_params.pkt_priority_to_cos = val;
 +
 +	/* RX COS0 */
 +	pfc_params.llfc_low_priority_classes = 0;
 +	/* RX COS1 */
 +	pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
 +
 +	/* BRB configuration */
 +	pfc_params.cos0_pauseable = false;
 +	pfc_params.cos1_pauseable = true;
 +
 +	bnx2x_acquire_phy_lock(bp);
 +	bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
 +	bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
 +	bnx2x_release_phy_lock(bp);
 +}
 +
 +static void bnx2x_pfc_clear(struct bnx2x *bp)
 +{
 +	struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
 +	nig_params.pause_enable = 1;
 +#ifdef BNX2X_SAFC
 +	if (bp->flags & SAFC_TX_FLAG) {
 +		u32 high = 0, low = 0;
 +		int i;
 +
 +		for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
 +			if (bp->pri_map[i] == 1)
 +				high |= (1 << i);
 +			if (bp->pri_map[i] == 0)
 +				low |= (1 << i);
 +		}
 +
 +		nig_params.llfc_low_priority_classes = high;
 +		nig_params.llfc_low_priority_classes = low;
 +
 +		nig_params.pause_enable = 0;
 +		nig_params.llfc_enable = 1;
 +		nig_params.llfc_out_en = 1;
 +	}
 +#endif /* BNX2X_SAFC */
 +	bnx2x_acquire_phy_lock(bp);
 +	bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
 +	bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
 +	bnx2x_release_phy_lock(bp);
 +}
 +
 +static void  bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
 +				       struct dcbx_features *features,
 +				       u32 error)
 +{
 +	u8 i = 0;
 +	DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
 +
 +	/* PG */
 +	DP(NETIF_MSG_LINK,
 +	   "local_mib.features.ets.enabled %x\n", features->ets.enabled);
 +	for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
 +		DP(NETIF_MSG_LINK,
 +		   "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
 +		   DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
 +	for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
 +		DP(NETIF_MSG_LINK,
 +		   "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
 +		   DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
 +
 +	/* pfc */
 +	DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
 +					features->pfc.pri_en_bitmap);
 +	DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
 +					features->pfc.pfc_caps);
 +	DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
 +					features->pfc.enabled);
 +
 +	DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
 +					features->app.default_pri);
 +	DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
 +					features->app.tc_supported);
 +	DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
 +					features->app.enabled);
 +	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
 +		DP(NETIF_MSG_LINK,
 +		   "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
 +		   i, features->app.app_pri_tbl[i].app_id);
 +		DP(NETIF_MSG_LINK,
 +		   "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
 +		   i, features->app.app_pri_tbl[i].pri_bitmap);
 +		DP(NETIF_MSG_LINK,
 +		   "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
 +		   i, features->app.app_pri_tbl[i].appBitfield);
 +	}
 +}
 +
 +static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
 +				       u8 pri_bitmap,
 +				       u8 llfc_traf_type)
 +{
 +	u32 pri = MAX_PFC_PRIORITIES;
 +	u32 index = MAX_PFC_PRIORITIES - 1;
 +	u32 pri_mask;
 +	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
 +
 +	/* Choose the highest priority */
 +	while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
 +		pri_mask = 1 << index;
 +		if (GET_FLAGS(pri_bitmap, pri_mask))
 +			pri = index ;
 +		index--;
 +	}
 +
 +	if (pri < MAX_PFC_PRIORITIES)
 +		ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
 +}
 +
 +static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
 +				   struct dcbx_app_priority_feature *app,
 +				   u32 error) {
 +	u8 index;
 +	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
 +
 +	if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
 +		DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
 +
 +	if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
 +		DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n");
 +
 +	if (app->enabled &&
 +	    !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) {
 +
 +		bp->dcbx_port_params.app.enabled = true;
 +
 +		for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
 +			ttp[index] = 0;
 +
 +		if (app->default_pri < MAX_PFC_PRIORITIES)
 +			ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
 +
 +		for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
 +			struct dcbx_app_priority_entry *entry =
 +							app->app_pri_tbl;
 +
 +			if (GET_FLAGS(entry[index].appBitfield,
 +				     DCBX_APP_SF_ETH_TYPE) &&
 +			   ETH_TYPE_FCOE == entry[index].app_id)
 +				bnx2x_dcbx_get_ap_priority(bp,
 +						entry[index].pri_bitmap,
 +						LLFC_TRAFFIC_TYPE_FCOE);
 +
 +			if (GET_FLAGS(entry[index].appBitfield,
 +				     DCBX_APP_SF_PORT) &&
 +			   TCP_PORT_ISCSI == entry[index].app_id)
 +				bnx2x_dcbx_get_ap_priority(bp,
 +						entry[index].pri_bitmap,
 +						LLFC_TRAFFIC_TYPE_ISCSI);
 +		}
 +	} else {
 +		DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
 +		bp->dcbx_port_params.app.enabled = false;
 +		for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
 +			ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
 +	}
 +}
 +
 +static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
 +				       struct dcbx_ets_feature *ets,
 +				       u32 error) {
 +	int i = 0;
 +	u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
 +	struct pg_help_data pg_help_data;
 +	struct bnx2x_dcbx_cos_params *cos_params =
 +			bp->dcbx_port_params.ets.cos_params;
 +
 +	memset(&pg_help_data, 0, sizeof(struct pg_help_data));
 +
 +
 +	if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
 +		DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
 +
 +
 +	/* Clean up old settings of ets on COS */
 +	for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
 +		cos_params[i].pauseable = false;
 +		cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
 +		cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
 +		cos_params[i].pri_bitmask = 0;
 +	}
 +
 +	if (bp->dcbx_port_params.app.enabled &&
 +	   !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
 +	   ets->enabled) {
 +		DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
 +		bp->dcbx_port_params.ets.enabled = true;
 +
 +		bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
 +					      pg_pri_orginal_spread,
 +					      ets->pri_pg_tbl);
 +
 +		bnx2x_dcbx_get_num_pg_traf_type(bp,
 +						pg_pri_orginal_spread,
 +						&pg_help_data);
 +
 +		bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
 +					   ets, pg_pri_orginal_spread);
 +
 +	} else {
 +		DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
 +		bp->dcbx_port_params.ets.enabled = false;
 +		ets->pri_pg_tbl[0] = 0;
 +
 +		for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
 +			DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
 +	}
 +}
 +
 +static void  bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
 +					struct dcbx_pfc_feature *pfc, u32 error)
 +{
 +
 +	if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
 +		DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
 +
 +	if (bp->dcbx_port_params.app.enabled &&
 +	   !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) &&
 +	   pfc->enabled) {
 +		bp->dcbx_port_params.pfc.enabled = true;
 +		bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
 +			~(pfc->pri_en_bitmap);
 +	} else {
 +		DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
 +		bp->dcbx_port_params.pfc.enabled = false;
 +		bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
 +	}
 +}
 +
 +/* maps unmapped priorities to to the same COS as L2 */
 +static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
 +{
 +	int i;
 +	u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
 +	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
 +	u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
 +	struct bnx2x_dcbx_cos_params *cos_params =
 +			bp->dcbx_port_params.ets.cos_params;
 +
 +	/* get unmapped priorities by clearing mapped bits */
 +	for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
 +		unmapped &= ~(1 << ttp[i]);
 +
 +	/* find cos for nw prio and extend it with unmapped */
 +	for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
 +		if (cos_params[i].pri_bitmask & nw_prio) {
 +			/* extend the bitmask with unmapped */
 +			DP(NETIF_MSG_LINK,
 +			   "cos %d extended with 0x%08x\n", i, unmapped);
 +			cos_params[i].pri_bitmask |= unmapped;
 +			break;
 +		}
 +	}
 +}
 +
 +static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
 +				     struct dcbx_features *features,
 +				     u32 error)
 +{
 +	bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
 +
 +	bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
 +
 +	bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
 +
 +	bnx2x_dcbx_map_nw(bp);
 +}
 +
 +#define DCBX_LOCAL_MIB_MAX_TRY_READ		(100)
 +static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
 +			       u32 *base_mib_addr,
 +			       u32 offset,
 +			       int read_mib_type)
 +{
 +	int max_try_read = 0;
 +	u32 mib_size, prefix_seq_num, suffix_seq_num;
 +	struct lldp_remote_mib *remote_mib ;
 +	struct lldp_local_mib  *local_mib;
 +
 +
 +	switch (read_mib_type) {
 +	case DCBX_READ_LOCAL_MIB:
 +		mib_size = sizeof(struct lldp_local_mib);
 +		break;
 +	case DCBX_READ_REMOTE_MIB:
 +		mib_size = sizeof(struct lldp_remote_mib);
 +		break;
 +	default:
 +		return 1; /*error*/
 +	}
 +
 +	offset += BP_PORT(bp) * mib_size;
 +
 +	do {
 +		bnx2x_read_data(bp, base_mib_addr, offset, mib_size);
 +
 +		max_try_read++;
 +
 +		switch (read_mib_type) {
 +		case DCBX_READ_LOCAL_MIB:
 +			local_mib = (struct lldp_local_mib *) base_mib_addr;
 +			prefix_seq_num = local_mib->prefix_seq_num;
 +			suffix_seq_num = local_mib->suffix_seq_num;
 +			break;
 +		case DCBX_READ_REMOTE_MIB:
 +			remote_mib = (struct lldp_remote_mib *) base_mib_addr;
 +			prefix_seq_num = remote_mib->prefix_seq_num;
 +			suffix_seq_num = remote_mib->suffix_seq_num;
 +			break;
 +		default:
 +			return 1; /*error*/
 +		}
 +	} while ((prefix_seq_num != suffix_seq_num) &&
 +	       (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
 +
 +	if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
 +		BNX2X_ERR("MIB could not be read\n");
 +		return 1;
 +	}
 +
 +	return 0;
 +}
 +
 +static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
 +{
 +	if (bp->dcbx_port_params.pfc.enabled &&
 +	    !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
 +		/*
 +		 * 1. Fills up common PFC structures if required
 +		 * 2. Configure NIG, MAC and BRB via the elink
 +		 */
 +		bnx2x_pfc_set(bp);
 +	else
 +		bnx2x_pfc_clear(bp);
 +}
 +
 +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
 +{
 +	struct bnx2x_func_state_params func_params = {0};
 +
 +	func_params.f_obj = &bp->func_obj;
 +	func_params.cmd = BNX2X_F_CMD_TX_STOP;
 +
 +	DP(NETIF_MSG_LINK, "STOP TRAFFIC\n");
 +	return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
 +{
 +	struct bnx2x_func_state_params func_params = {0};
 +	struct bnx2x_func_tx_start_params *tx_params =
 +		&func_params.params.tx_start;
 +
 +	func_params.f_obj = &bp->func_obj;
 +	func_params.cmd = BNX2X_F_CMD_TX_START;
 +
 +	bnx2x_dcbx_fw_struct(bp, tx_params);
 +
 +	DP(NETIF_MSG_LINK, "START TRAFFIC\n");
 +	return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
 +{
 +	struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
 +	int rc = 0;
 +
 +	if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
 +		BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
 +		return;
 +	}
 +
 +	/* valid COS entries */
 +	if (ets->num_of_cos == 1)   /* no ETS */
 +		return;
 +
 +	/* sanity */
 +	if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
 +	     (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
 +	    ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
 +	     (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
 +		BNX2X_ERR("all COS should have at least bw_limit or strict"
 +			    "ets->cos_params[0].strict= %x"
 +			    "ets->cos_params[0].bw_tbl= %x"
 +			    "ets->cos_params[1].strict= %x"
 +			    "ets->cos_params[1].bw_tbl= %x",
 +			  ets->cos_params[0].strict,
 +			  ets->cos_params[0].bw_tbl,
 +			  ets->cos_params[1].strict,
 +			  ets->cos_params[1].bw_tbl);
 +		return;
 +	}
 +	/* If we join a group and there is bw_tbl and strict then bw rules */
 +	if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
 +	    (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
 +		u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
 +		u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
 +		/* Do not allow 0-100 configuration
 +		 * since PBF does not support it
 +		 * force 1-99 instead
 +		 */
 +		if (bw_tbl_0 == 0) {
 +			bw_tbl_0 = 1;
 +			bw_tbl_1 = 99;
 +		} else if (bw_tbl_1 == 0) {
 +			bw_tbl_1 = 1;
 +			bw_tbl_0 = 99;
 +		}
 +
 +		bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
 +	} else {
 +		if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
 +			rc = bnx2x_ets_strict(&bp->link_params, 0);
 +		else if (ets->cos_params[1].strict
 +					== BNX2X_DCBX_STRICT_COS_HIGHEST)
 +			rc = bnx2x_ets_strict(&bp->link_params, 1);
 +		if (rc)
 +			BNX2X_ERR("update_ets_params failed\n");
 +	}
 +}
 +
 +/*
 + * In E3B0 the configuration may have more than 2 COS.
 + */
 +void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
 +{
 +	struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
 +	struct bnx2x_ets_params ets_params = { 0 };
 +	u8 i;
 +
 +	ets_params.num_of_cos = ets->num_of_cos;
 +
 +	for (i = 0; i < ets->num_of_cos; i++) {
 +		/* COS is SP */
 +		if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
 +			if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
 +				BNX2X_ERR("COS can't be not BW and not SP\n");
 +				return;
 +			}
 +
 +			ets_params.cos[i].state = bnx2x_cos_state_strict;
 +			ets_params.cos[i].params.sp_params.pri =
 +						ets->cos_params[i].strict;
 +		} else { /* COS is BW */
 +			if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
 +				BNX2X_ERR("COS can't be not BW and not SP\n");
 +				return;
 +			}
 +			ets_params.cos[i].state = bnx2x_cos_state_bw;
 +			ets_params.cos[i].params.bw_params.bw =
 +						(u8)ets->cos_params[i].bw_tbl;
 +		}
 +	}
 +
 +	/* Configure the ETS in HW */
 +	if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
 +				  &ets_params)) {
 +		BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
 +		bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
 +	}
 +}
 +
 +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
 +{
 +	bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
 +
 +	if (!bp->dcbx_port_params.ets.enabled ||
 +	    (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
 +		return;
 +
 +	if (CHIP_IS_E3B0(bp))
 +		bnx2x_dcbx_update_ets_config(bp);
 +	else
 +		bnx2x_dcbx_2cos_limit_update_ets_config(bp);
 +}
 +
 +#ifdef BCM_DCBNL
 +static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
 +{
 +	struct lldp_remote_mib remote_mib = {0};
 +	u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
 +	int rc;
 +
 +	DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n",
 +	   dcbx_remote_mib_offset);
 +
 +	if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
 +		BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n");
 +		return -EINVAL;
 +	}
 +
 +	rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset,
 +				 DCBX_READ_REMOTE_MIB);
 +
 +	if (rc) {
 +		BNX2X_ERR("Faild to read remote mib from FW\n");
 +		return rc;
 +	}
 +
 +	/* save features and flags */
 +	bp->dcbx_remote_feat = remote_mib.features;
 +	bp->dcbx_remote_flags = remote_mib.flags;
 +	return 0;
 +}
 +#endif
 +
 +static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
 +{
 +	struct lldp_local_mib local_mib = {0};
 +	u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
 +	int rc;
 +
 +	DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
 +
 +	if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
 +		BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
 +		return -EINVAL;
 +	}
 +
 +	rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
 +				 DCBX_READ_LOCAL_MIB);
 +
 +	if (rc) {
 +		BNX2X_ERR("Faild to read local mib from FW\n");
 +		return rc;
 +	}
 +
 +	/* save features and error */
 +	bp->dcbx_local_feat = local_mib.features;
 +	bp->dcbx_error = local_mib.error;
 +	return 0;
 +}
 +
 +
 +#ifdef BCM_DCBNL
 +static inline
 +u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
 +{
 +	u8 pri;
 +
 +	/* Choose the highest priority */
 +	for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
 +		if (ent->pri_bitmap & (1 << pri))
 +			break;
 +	return pri;
 +}
 +
 +static inline
 +u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
 +{
 +	return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
 +		DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
 +		DCB_APP_IDTYPE_ETHTYPE;
 +}
 +
 +int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
 +{
 +	int i, err = 0;
 +
 +	for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
 +		struct dcbx_app_priority_entry *ent =
 +			&bp->dcbx_local_feat.app.app_pri_tbl[i];
 +
 +		if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
 +			u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
 +
 +			/* avoid invalid user-priority */
 +			if (up) {
 +				struct dcb_app app;
 +				app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
 +				app.protocol = ent->app_id;
 +				app.priority = delall ? 0 : up;
 +				err = dcb_setapp(bp->dev, &app);
 +			}
 +		}
 +	}
 +	return err;
 +}
 +#endif
 +
 +static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
 +{
 +	if (SHMEM2_HAS(bp, drv_flags)) {
 +		u32 drv_flags;
 +		bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
 +		drv_flags = SHMEM2_RD(bp, drv_flags);
 +
 +		if (set)
 +			SET_FLAGS(drv_flags, flags);
 +		else
 +			RESET_FLAGS(drv_flags, flags);
 +
 +		SHMEM2_WR(bp, drv_flags, drv_flags);
 +		DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
 +		bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
 +	}
 +}
 +
 +static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
 +{
 +	u8 prio, cos;
 +	for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
 +		for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
 +			if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
 +			    & (1 << prio)) {
 +				bp->prio_to_cos[prio] = cos;
 +				DP(NETIF_MSG_LINK,
 +				   "tx_mapping %d --> %d\n", prio, cos);
 +			}
 +		}
 +	}
 +
 +	/* setup tc must be called under rtnl lock, but we can't take it here
 +	 * as we are handling an attetntion on a work queue which must be
 +	 * flushed at some rtnl-locked contexts (e.g. if down)
 +	 */
 +	if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
 +		schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +}
 +
 +void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
 +{
 +	switch (state) {
 +	case BNX2X_DCBX_STATE_NEG_RECEIVED:
 +		{
 +			DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
 +#ifdef BCM_DCBNL
 +			/**
 +			 * Delete app tlvs from dcbnl before reading new
 +			 * negotiation results
 +			 */
 +			bnx2x_dcbnl_update_applist(bp, true);
 +
 +			/* Read rmeote mib if dcbx is in the FW */
 +			if (bnx2x_dcbx_read_shmem_remote_mib(bp))
 +				return;
 +#endif
 +			/* Read neg results if dcbx is in the FW */
 +			if (bnx2x_dcbx_read_shmem_neg_results(bp))
 +				return;
 +
 +			bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
 +						  bp->dcbx_error);
 +
 +			bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
 +						 bp->dcbx_error);
 +
 +			/* mark DCBX result for PMF migration */
 +			bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
 +#ifdef BCM_DCBNL
 +			/**
 +			 * Add new app tlvs to dcbnl
 +			 */
 +			bnx2x_dcbnl_update_applist(bp, false);
 +#endif
 +			bnx2x_dcbx_stop_hw_tx(bp);
 +
 +			/* reconfigure the netdevice with the results of the new
 +			 * dcbx negotiation.
 +			 */
 +			bnx2x_dcbx_update_tc_mapping(bp);
 +
 +			return;
 +		}
 +	case BNX2X_DCBX_STATE_TX_PAUSED:
 +		DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
 +		bnx2x_pfc_set_pfc(bp);
 +
 +		bnx2x_dcbx_update_ets_params(bp);
 +		bnx2x_dcbx_resume_hw_tx(bp);
 +		return;
 +	case BNX2X_DCBX_STATE_TX_RELEASED:
 +		DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
 +		bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
 +#ifdef BCM_DCBNL
 +		/*
 +		 * Send a notification for the new negotiated parameters
 +		 */
 +		dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
 +#endif
 +		return;
 +	default:
 +		BNX2X_ERR("Unknown DCBX_STATE\n");
 +	}
 +}
 +
 +#define LLDP_ADMIN_MIB_OFFSET(bp)	(PORT_MAX*sizeof(struct lldp_params) + \
 +				      BP_PORT(bp)*sizeof(struct lldp_admin_mib))
 +
 +static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
 +				u32 dcbx_lldp_params_offset)
 +{
 +	struct lldp_admin_mib admin_mib;
 +	u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
 +	u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
 +
 +	/*shortcuts*/
 +	struct dcbx_features *af = &admin_mib.features;
 +	struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
 +
 +	memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
 +
 +	/* Read the data first */
 +	bnx2x_read_data(bp, (u32 *)&admin_mib, offset,
 +			sizeof(struct lldp_admin_mib));
 +
 +	if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
 +		SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
 +	else
 +		RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
 +
 +	if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) {
 +
 +		RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
 +		admin_mib.ver_cfg_flags |=
 +			(dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
 +			 DCBX_CEE_VERSION_MASK;
 +
 +		af->ets.enabled = (u8)dp->admin_ets_enable;
 +
 +		af->pfc.enabled = (u8)dp->admin_pfc_enable;
 +
 +		/* FOR IEEE dp->admin_tc_supported_tx_enable */
 +		if (dp->admin_ets_configuration_tx_enable)
 +			SET_FLAGS(admin_mib.ver_cfg_flags,
 +				  DCBX_ETS_CONFIG_TX_ENABLED);
 +		else
 +			RESET_FLAGS(admin_mib.ver_cfg_flags,
 +				    DCBX_ETS_CONFIG_TX_ENABLED);
 +		/* For IEEE admin_ets_recommendation_tx_enable */
 +		if (dp->admin_pfc_tx_enable)
 +			SET_FLAGS(admin_mib.ver_cfg_flags,
 +				  DCBX_PFC_CONFIG_TX_ENABLED);
 +		else
 +			RESET_FLAGS(admin_mib.ver_cfg_flags,
 +				  DCBX_PFC_CONFIG_TX_ENABLED);
 +
 +		if (dp->admin_application_priority_tx_enable)
 +			SET_FLAGS(admin_mib.ver_cfg_flags,
 +				  DCBX_APP_CONFIG_TX_ENABLED);
 +		else
 +			RESET_FLAGS(admin_mib.ver_cfg_flags,
 +				  DCBX_APP_CONFIG_TX_ENABLED);
 +
 +		if (dp->admin_ets_willing)
 +			SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
 +		else
 +			RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
 +		/* For IEEE admin_ets_reco_valid */
 +		if (dp->admin_pfc_willing)
 +			SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
 +		else
 +			RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
 +
 +		if (dp->admin_app_priority_willing)
 +			SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
 +		else
 +			RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
 +
 +		for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
 +			DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
 +				(u8)dp->admin_configuration_bw_precentage[i]);
 +
 +			DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
 +			   i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
 +		}
 +
 +		for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
 +			DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
 +					(u8)dp->admin_configuration_ets_pg[i]);
 +
 +			DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
 +			   i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
 +		}
 +
 +		/*For IEEE admin_recommendation_bw_precentage
 +		 *For IEEE admin_recommendation_ets_pg */
 +		af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
 +		for (i = 0; i < 4; i++) {
 +			if (dp->admin_priority_app_table[i].valid) {
 +				struct bnx2x_admin_priority_app_table *table =
 +					dp->admin_priority_app_table;
 +				if ((ETH_TYPE_FCOE == table[i].app_id) &&
 +				   (TRAFFIC_TYPE_ETH == table[i].traffic_type))
 +					traf_type = FCOE_APP_IDX;
 +				else if ((TCP_PORT_ISCSI == table[i].app_id) &&
 +				   (TRAFFIC_TYPE_PORT == table[i].traffic_type))
 +					traf_type = ISCSI_APP_IDX;
 +				else
 +					traf_type = other_traf_type++;
 +
 +				af->app.app_pri_tbl[traf_type].app_id =
 +					table[i].app_id;
 +
 +				af->app.app_pri_tbl[traf_type].pri_bitmap =
 +					(u8)(1 << table[i].priority);
 +
 +				af->app.app_pri_tbl[traf_type].appBitfield =
 +				    (DCBX_APP_ENTRY_VALID);
 +
 +				af->app.app_pri_tbl[traf_type].appBitfield |=
 +				   (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
 +					DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
 +			}
 +		}
 +
 +		af->app.default_pri = (u8)dp->admin_default_priority;
 +
 +	}
 +
 +	/* Write the data. */
 +	bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
 +			 sizeof(struct lldp_admin_mib));
 +
 +}
 +
 +void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
 +{
 +	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
 +		bp->dcb_state = dcb_on;
 +		bp->dcbx_enabled = dcbx_enabled;
 +	} else {
 +		bp->dcb_state = false;
 +		bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
 +	}
 +	DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
 +	   dcb_on ? "ON" : "OFF",
 +	   dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
 +	   dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
 +	   dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
 +	   "on-chip with negotiation" : "invalid");
 +}
 +
 +void bnx2x_dcbx_init_params(struct bnx2x *bp)
 +{
 +	bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
 +	bp->dcbx_config_params.admin_ets_willing = 1;
 +	bp->dcbx_config_params.admin_pfc_willing = 1;
 +	bp->dcbx_config_params.overwrite_settings = 1;
 +	bp->dcbx_config_params.admin_ets_enable = 1;
 +	bp->dcbx_config_params.admin_pfc_enable = 1;
 +	bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
 +	bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
 +	bp->dcbx_config_params.admin_pfc_tx_enable = 1;
 +	bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
 +	bp->dcbx_config_params.admin_ets_reco_valid = 1;
 +	bp->dcbx_config_params.admin_app_priority_willing = 1;
 +	bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
 +	bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
 +	bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
 +	bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
 +	bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
 +	bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
 +	bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
 +	bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
 +	bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
 +	bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
 +	bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
 +	bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
 +	bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
 +	bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
 +	bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
 +	bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
 +	bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
 +	bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
 +	bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
 +	bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
 +	bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
 +	bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
 +	bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
 +	bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
 +	bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
 +	bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
 +	bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
 +	bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
 +	bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
 +	bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
 +	bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
 +	bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
 +	bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
 +	bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
 +	bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
 +	bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
 +	bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
 +	bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
 +	bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
 +	bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
 +	bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
 +	bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
 +	bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
 +	bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
 +	bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
 +	bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
 +	bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
 +	bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
 +	bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
 +	bp->dcbx_config_params.admin_default_priority =
 +		bp->dcbx_config_params.admin_priority_app_table[1].priority;
 +}
 +
 +void bnx2x_dcbx_init(struct bnx2x *bp)
 +{
 +	u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
 +
 +	if (bp->dcbx_enabled <= 0)
 +		return;
 +
 +	/* validate:
 +	 * chip of good for dcbx version,
 +	 * dcb is wanted
 +	 * the function is pmf
 +	 * shmem2 contains DCBX support fields
 +	 */
 +	DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
 +	   bp->dcb_state, bp->port.pmf);
 +
 +	if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
 +	    SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
 +		dcbx_lldp_params_offset =
 +			SHMEM2_RD(bp, dcbx_lldp_params_offset);
 +
 +		DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
 +		   dcbx_lldp_params_offset);
 +
 +		bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
 +
 +		if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
 +			bnx2x_dcbx_admin_mib_updated_params(bp,
 +				dcbx_lldp_params_offset);
 +
 +			/* Let HW start negotiation */
 +			bnx2x_fw_command(bp,
 +					 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
 +		}
 +	}
 +}
 +static void
 +bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
 +			    struct bnx2x_func_tx_start_params *pfc_fw_cfg)
 +{
 +	u8 pri = 0;
 +	u8 cos = 0;
 +
 +	DP(NETIF_MSG_LINK,
 +	   "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
 +	DP(NETIF_MSG_LINK,
 +	   "pdev->params.dcbx_port_params.pfc."
 +	   "priority_non_pauseable_mask %x\n",
 +	   bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
 +
 +	for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
 +		DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
 +		   "cos_params[%d].pri_bitmask %x\n", cos,
 +		   bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
 +
 +		DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
 +		   "cos_params[%d].bw_tbl %x\n", cos,
 +		   bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
 +
 +		DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
 +		   "cos_params[%d].strict %x\n", cos,
 +		   bp->dcbx_port_params.ets.cos_params[cos].strict);
 +
 +		DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
 +		   "cos_params[%d].pauseable %x\n", cos,
 +		   bp->dcbx_port_params.ets.cos_params[cos].pauseable);
 +	}
 +
 +	for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
 +		DP(NETIF_MSG_LINK,
 +		   "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
 +		   "priority %x\n", pri,
 +		   pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
 +
 +		DP(NETIF_MSG_LINK,
 +		   "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
 +		   pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
 +	}
 +}
 +
 +/* fills help_data according to pg_info */
 +static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
 +					    u32 *pg_pri_orginal_spread,
 +					    struct pg_help_data *help_data)
 +{
 +	bool pg_found  = false;
 +	u32 i, traf_type, add_traf_type, add_pg;
 +	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
 +	struct pg_entry_help_data *data = help_data->data; /*shotcut*/
 +
 +	/* Set to invalid */
 +	for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
 +		data[i].pg = DCBX_ILLEGAL_PG;
 +
 +	for (add_traf_type = 0;
 +	     add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
 +		pg_found = false;
 +		if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
 +			add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
 +			for (traf_type = 0;
 +			     traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
 +			     traf_type++) {
 +				if (data[traf_type].pg == add_pg) {
 +					if (!(data[traf_type].pg_priority &
 +					     (1 << ttp[add_traf_type])))
 +						data[traf_type].
 +							num_of_dif_pri++;
 +					data[traf_type].pg_priority |=
 +						(1 << ttp[add_traf_type]);
 +					pg_found = true;
 +					break;
 +				}
 +			}
 +			if (false == pg_found) {
 +				data[help_data->num_of_pg].pg = add_pg;
 +				data[help_data->num_of_pg].pg_priority =
 +						(1 << ttp[add_traf_type]);
 +				data[help_data->num_of_pg].num_of_dif_pri = 1;
 +				help_data->num_of_pg++;
 +			}
 +		}
 +		DP(NETIF_MSG_LINK,
 +		   "add_traf_type %d pg_found %s num_of_pg %d\n",
 +		   add_traf_type, (false == pg_found) ? "NO" : "YES",
 +		   help_data->num_of_pg);
 +	}
 +}
 +
 +static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
 +					       struct cos_help_data *cos_data,
 +					       u32 pri_join_mask)
 +{
 +	/* Only one priority than only one COS */
 +	cos_data->data[0].pausable =
 +		IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
 +	cos_data->data[0].pri_join_mask = pri_join_mask;
 +	cos_data->data[0].cos_bw = 100;
 +	cos_data->num_of_cos = 1;
 +}
 +
 +static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
 +					    struct cos_entry_help_data *data,
 +					    u8 pg_bw)
 +{
 +	if (data->cos_bw == DCBX_INVALID_COS_BW)
 +		data->cos_bw = pg_bw;
 +	else
 +		data->cos_bw += pg_bw;
 +}
 +
 +static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
 +			struct cos_help_data *cos_data,
 +			u32 *pg_pri_orginal_spread,
 +			struct dcbx_ets_feature *ets)
 +{
 +	u32	pri_tested	= 0;
 +	u8	i		= 0;
 +	u8	entry		= 0;
 +	u8	pg_entry	= 0;
 +	u8	num_of_pri	= LLFC_DRIVER_TRAFFIC_TYPE_MAX;
 +
 +	cos_data->data[0].pausable = true;
 +	cos_data->data[1].pausable = false;
 +	cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
 +
 +	for (i = 0 ; i < num_of_pri ; i++) {
 +		pri_tested = 1 << bp->dcbx_port_params.
 +					app.traffic_type_priority[i];
 +
 +		if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
 +			cos_data->data[1].pri_join_mask |= pri_tested;
 +			entry = 1;
 +		} else {
 +			cos_data->data[0].pri_join_mask |= pri_tested;
 +			entry = 0;
 +		}
 +		pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
 +						app.traffic_type_priority[i]];
 +		/* There can be only one strict pg */
 +		if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
 +			bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
 +				DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
 +		else
 +			/* If we join a group and one is strict
 +			 * than the bw rulls */
 +			cos_data->data[entry].strict =
 +						BNX2X_DCBX_STRICT_COS_HIGHEST;
 +	}
 +	if ((0 == cos_data->data[0].pri_join_mask) &&
 +	    (0 == cos_data->data[1].pri_join_mask))
 +		BNX2X_ERR("dcbx error: Both groups must have priorities\n");
 +}
 +
 +
 +#ifndef POWER_OF_2
 +#define POWER_OF_2(x)	((0 != x) && (0 == (x & (x-1))))
 +#endif
 +
 +static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
 +					      struct pg_help_data *pg_help_data,
 +					      struct cos_help_data *cos_data,
 +					      u32 pri_join_mask,
 +					      u8 num_of_dif_pri)
 +{
 +	u8 i = 0;
 +	u32 pri_tested = 0;
 +	u32 pri_mask_without_pri = 0;
 +	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
 +	/*debug*/
 +	if (num_of_dif_pri == 1) {
 +		bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
 +		return;
 +	}
 +	/* single priority group */
 +	if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
 +		/* If there are both pauseable and non-pauseable priorities,
 +		 * the pauseable priorities go to the first queue and
 +		 * the non-pauseable priorities go to the second queue.
 +		 */
 +		if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
 +			/* Pauseable */
 +			cos_data->data[0].pausable = true;
 +			/* Non pauseable.*/
 +			cos_data->data[1].pausable = false;
 +
 +			if (2 == num_of_dif_pri) {
 +				cos_data->data[0].cos_bw = 50;
 +				cos_data->data[1].cos_bw = 50;
 +			}
 +
 +			if (3 == num_of_dif_pri) {
 +				if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
 +							pri_join_mask))) {
 +					cos_data->data[0].cos_bw = 33;
 +					cos_data->data[1].cos_bw = 67;
 +				} else {
 +					cos_data->data[0].cos_bw = 67;
 +					cos_data->data[1].cos_bw = 33;
 +				}
 +			}
 +
 +		} else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
 +			/* If there are only pauseable priorities,
 +			 * then one/two priorities go to the first queue
 +			 * and one priority goes to the second queue.
 +			 */
 +			if (2 == num_of_dif_pri) {
 +				cos_data->data[0].cos_bw = 50;
 +				cos_data->data[1].cos_bw = 50;
 +			} else {
 +				cos_data->data[0].cos_bw = 67;
 +				cos_data->data[1].cos_bw = 33;
 +			}
 +			cos_data->data[1].pausable = true;
 +			cos_data->data[0].pausable = true;
 +			/* All priorities except FCOE */
 +			cos_data->data[0].pri_join_mask = (pri_join_mask &
 +				((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
 +			/* Only FCOE priority.*/
 +			cos_data->data[1].pri_join_mask =
 +				(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
 +		} else
 +			/* If there are only non-pauseable priorities,
 +			 * they will all go to the same queue.
 +			 */
 +			bnx2x_dcbx_ets_disabled_entry_data(bp,
 +						cos_data, pri_join_mask);
 +	} else {
 +		/* priority group which is not BW limited (PG#15):*/
 +		if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
 +			/* If there are both pauseable and non-pauseable
 +			 * priorities, the pauseable priorities go to the first
 +			 * queue and the non-pauseable priorities
 +			 * go to the second queue.
 +			 */
 +			if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
 +			    DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
 +				cos_data->data[0].strict =
 +					BNX2X_DCBX_STRICT_COS_HIGHEST;
 +				cos_data->data[1].strict =
 +					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
 +						BNX2X_DCBX_STRICT_COS_HIGHEST);
 +			} else {
 +				cos_data->data[0].strict =
 +					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
 +						BNX2X_DCBX_STRICT_COS_HIGHEST);
 +				cos_data->data[1].strict =
 +					BNX2X_DCBX_STRICT_COS_HIGHEST;
 +			}
 +			/* Pauseable */
 +			cos_data->data[0].pausable = true;
 +			/* Non pause-able.*/
 +			cos_data->data[1].pausable = false;
 +		} else {
 +			/* If there are only pauseable priorities or
 +			 * only non-pauseable,* the lower priorities go
 +			 * to the first queue and the higherpriorities go
 +			 * to the second queue.
 +			 */
 +			cos_data->data[0].pausable =
 +				cos_data->data[1].pausable =
 +				IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
 +
 +			for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
 +				pri_tested = 1 << bp->dcbx_port_params.
 +					app.traffic_type_priority[i];
 +				/* Remove priority tested */
 +				pri_mask_without_pri =
 +					(pri_join_mask & ((u8)(~pri_tested)));
 +				if (pri_mask_without_pri < pri_tested)
 +					break;
 +			}
 +
 +			if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
 +				BNX2X_ERR("Invalid value for pri_join_mask -"
 +					  " could not find a priority\n");
 +
 +			cos_data->data[0].pri_join_mask = pri_mask_without_pri;
 +			cos_data->data[1].pri_join_mask = pri_tested;
 +			/* Both queues are strict priority,
 +			 * and that with the highest priority
 +			 * gets the highest strict priority in the arbiter.
 +			 */
 +			cos_data->data[0].strict =
 +					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
 +						BNX2X_DCBX_STRICT_COS_HIGHEST);
 +			cos_data->data[1].strict =
 +					BNX2X_DCBX_STRICT_COS_HIGHEST;
 +		}
 +	}
 +}
 +
 +static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
 +			    struct bnx2x		*bp,
 +			    struct  pg_help_data	*pg_help_data,
 +			    struct dcbx_ets_feature	*ets,
 +			    struct cos_help_data	*cos_data,
 +			    u32			*pg_pri_orginal_spread,
 +			    u32				pri_join_mask,
 +			    u8				num_of_dif_pri)
 +{
 +	u8 i = 0;
 +	u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
 +
 +	/* If there are both pauseable and non-pauseable priorities,
 +	 * the pauseable priorities go to the first queue and
 +	 * the non-pauseable priorities go to the second queue.
 +	 */
 +	if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
 +		if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
 +					 pg_help_data->data[0].pg_priority) ||
 +		    IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
 +					 pg_help_data->data[1].pg_priority)) {
 +			/* If one PG contains both pauseable and
 +			 * non-pauseable priorities then ETS is disabled.
 +			 */
 +			bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
 +					pg_pri_orginal_spread, ets);
 +			bp->dcbx_port_params.ets.enabled = false;
 +			return;
 +		}
 +
 +		/* Pauseable */
 +		cos_data->data[0].pausable = true;
 +		/* Non pauseable. */
 +		cos_data->data[1].pausable = false;
 +		if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
 +				pg_help_data->data[0].pg_priority)) {
 +			/* 0 is pauseable */
 +			cos_data->data[0].pri_join_mask =
 +				pg_help_data->data[0].pg_priority;
 +			pg[0] = pg_help_data->data[0].pg;
 +			cos_data->data[1].pri_join_mask =
 +				pg_help_data->data[1].pg_priority;
 +			pg[1] = pg_help_data->data[1].pg;
 +		} else {/* 1 is pauseable */
 +			cos_data->data[0].pri_join_mask =
 +				pg_help_data->data[1].pg_priority;
 +			pg[0] = pg_help_data->data[1].pg;
 +			cos_data->data[1].pri_join_mask =
 +				pg_help_data->data[0].pg_priority;
 +			pg[1] = pg_help_data->data[0].pg;
 +		}
 +	} else {
 +		/* If there are only pauseable priorities or
 +		 * only non-pauseable, each PG goes to a queue.
 +		 */
 +		cos_data->data[0].pausable = cos_data->data[1].pausable =
 +			IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
 +		cos_data->data[0].pri_join_mask =
 +			pg_help_data->data[0].pg_priority;
 +		pg[0] = pg_help_data->data[0].pg;
 +		cos_data->data[1].pri_join_mask =
 +			pg_help_data->data[1].pg_priority;
 +		pg[1] = pg_help_data->data[1].pg;
 +	}
 +
 +	/* There can be only one strict pg */
 +	for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
 +		if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
 +			cos_data->data[i].cos_bw =
 +				DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
 +		else
 +			cos_data->data[i].strict =
 +						BNX2X_DCBX_STRICT_COS_HIGHEST;
 +	}
 +}
 +
 +static int bnx2x_dcbx_join_pgs(
 +			      struct bnx2x            *bp,
 +			      struct dcbx_ets_feature *ets,
 +			      struct pg_help_data     *pg_help_data,
 +			      u8                      required_num_of_pg)
 +{
 +	u8 entry_joined    = pg_help_data->num_of_pg - 1;
 +	u8 entry_removed   = entry_joined + 1;
 +	u8 pg_joined       = 0;
 +
 +	if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data)
 +						<= pg_help_data->num_of_pg) {
 +
 +		BNX2X_ERR("required_num_of_pg can't be zero\n");
 +		return -EINVAL;
 +	}
 +
 +	while (required_num_of_pg < pg_help_data->num_of_pg) {
 +		entry_joined = pg_help_data->num_of_pg - 2;
 +		entry_removed = entry_joined + 1;
 +		/* protect index */
 +		entry_removed %= ARRAY_SIZE(pg_help_data->data);
 +
 +		pg_help_data->data[entry_joined].pg_priority |=
 +			pg_help_data->data[entry_removed].pg_priority;
 +
 +		pg_help_data->data[entry_joined].num_of_dif_pri +=
 +			pg_help_data->data[entry_removed].num_of_dif_pri;
 +
 +		if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
 +		    pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
 +			/* Entries joined strict priority rules */
 +			pg_help_data->data[entry_joined].pg =
 +							DCBX_STRICT_PRI_PG;
 +		else {
 +			/* Entries can be joined join BW */
 +			pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl,
 +					pg_help_data->data[entry_joined].pg) +
 +				    DCBX_PG_BW_GET(ets->pg_bw_tbl,
 +					pg_help_data->data[entry_removed].pg);
 +
 +			DCBX_PG_BW_SET(ets->pg_bw_tbl,
 +				pg_help_data->data[entry_joined].pg, pg_joined);
 +		}
 +		/* Joined the entries */
 +		pg_help_data->num_of_pg--;
 +	}
 +
 +	return 0;
 +}
 +
 +static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
 +			      struct bnx2x		*bp,
 +			      struct pg_help_data	*pg_help_data,
 +			      struct dcbx_ets_feature	*ets,
 +			      struct cos_help_data	*cos_data,
 +			      u32			*pg_pri_orginal_spread,
 +			      u32			pri_join_mask,
 +			      u8			num_of_dif_pri)
 +{
 +	u8 i = 0;
 +	u32 pri_tested = 0;
 +	u8 entry = 0;
 +	u8 pg_entry = 0;
 +	bool b_found_strict = false;
 +	u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
 +
 +	cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
 +	/* If there are both pauseable and non-pauseable priorities,
 +	 * the pauseable priorities go to the first queue and the
 +	 * non-pauseable priorities go to the second queue.
 +	 */
 +	if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
 +		bnx2x_dcbx_separate_pauseable_from_non(bp,
 +				cos_data, pg_pri_orginal_spread, ets);
 +	else {
 +		/* If two BW-limited PG-s were combined to one queue,
 +		 * the BW is their sum.
 +		 *
 +		 * If there are only pauseable priorities or only non-pauseable,
 +		 * and there are both BW-limited and non-BW-limited PG-s,
 +		 * the BW-limited PG/s go to one queue and the non-BW-limited
 +		 * PG/s go to the second queue.
 +		 *
 +		 * If there are only pauseable priorities or only non-pauseable
 +		 * and all are BW limited, then	two priorities go to the first
 +		 * queue and one priority goes to the second queue.
 +		 *
 +		 * We will join this two cases:
 +		 * if one is BW limited it will go to the secoend queue
 +		 * otherwise the last priority will get it
 +		 */
 +
 +		cos_data->data[0].pausable = cos_data->data[1].pausable =
 +			IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
 +
 +		for (i = 0 ; i < num_of_pri; i++) {
 +			pri_tested = 1 << bp->dcbx_port_params.
 +				app.traffic_type_priority[i];
 +			pg_entry = (u8)pg_pri_orginal_spread[bp->
 +				dcbx_port_params.app.traffic_type_priority[i]];
 +
 +			if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
 +				entry = 0;
 +
 +				if (i == (num_of_pri-1) &&
 +				    false == b_found_strict)
 +					/* last entry will be handled separately
 +					 * If no priority is strict than last
 +					 * enty goes to last queue.*/
 +					entry = 1;
 +				cos_data->data[entry].pri_join_mask |=
 +								pri_tested;
 +				bnx2x_dcbx_add_to_cos_bw(bp,
 +					&cos_data->data[entry],
 +					DCBX_PG_BW_GET(ets->pg_bw_tbl,
 +						       pg_entry));
 +			} else {
 +				b_found_strict = true;
 +				cos_data->data[1].pri_join_mask |= pri_tested;
 +				/* If we join a group and one is strict
 +				 * than the bw rulls */
 +				cos_data->data[1].strict =
 +					BNX2X_DCBX_STRICT_COS_HIGHEST;
 +			}
 +		}
 +	}
 +}
 +
 +
 +static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
 +				       struct pg_help_data *help_data,
 +				       struct dcbx_ets_feature *ets,
 +				       struct cos_help_data *cos_data,
 +				       u32 *pg_pri_orginal_spread,
 +				       u32 pri_join_mask,
 +				       u8 num_of_dif_pri)
 +{
 +
 +	/* default E2 settings */
 +	cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
 +
 +	switch (help_data->num_of_pg) {
 +	case 1:
 +		bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(
 +					       bp,
 +					       help_data,
 +					       cos_data,
 +					       pri_join_mask,
 +					       num_of_dif_pri);
 +		break;
 +	case 2:
 +		bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
 +					    bp,
 +					    help_data,
 +					    ets,
 +					    cos_data,
 +					    pg_pri_orginal_spread,
 +					    pri_join_mask,
 +					    num_of_dif_pri);
 +		break;
 +
 +	case 3:
 +		bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
 +					      bp,
 +					      help_data,
 +					      ets,
 +					      cos_data,
 +					      pg_pri_orginal_spread,
 +					      pri_join_mask,
 +					      num_of_dif_pri);
 +		break;
 +	default:
 +		BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
 +		bnx2x_dcbx_ets_disabled_entry_data(bp,
 +						   cos_data, pri_join_mask);
 +	}
 +}
 +
 +static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
 +					struct cos_help_data *cos_data,
 +					u8 entry,
 +					u8 num_spread_of_entries,
 +					u8 strict_app_pris)
 +{
 +	u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST;
 +	u8 num_of_app_pri = MAX_PFC_PRIORITIES;
 +	u8 app_pri_bit = 0;
 +
 +	while (num_spread_of_entries && num_of_app_pri > 0) {
 +		app_pri_bit = 1 << (num_of_app_pri - 1);
 +		if (app_pri_bit & strict_app_pris) {
 +			struct cos_entry_help_data *data = &cos_data->
 +								data[entry];
 +			num_spread_of_entries--;
 +			if (num_spread_of_entries == 0) {
 +				/* last entry needed put all the entries left */
 +				data->cos_bw = DCBX_INVALID_COS_BW;
 +				data->strict = strict_pri;
 +				data->pri_join_mask = strict_app_pris;
 +				data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
 +							data->pri_join_mask);
 +			} else {
 +				strict_app_pris &= ~app_pri_bit;
 +
 +				data->cos_bw = DCBX_INVALID_COS_BW;
 +				data->strict = strict_pri;
 +				data->pri_join_mask = app_pri_bit;
 +				data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
 +							data->pri_join_mask);
 +			}
 +
 +			strict_pri =
 +			    BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri);
 +			entry++;
 +		}
 +
 +		num_of_app_pri--;
 +	}
 +
 +	if (num_spread_of_entries)
 +		return -EINVAL;
 +
 +	return 0;
 +}
 +
 +static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
 +					 struct cos_help_data *cos_data,
 +					 u8 entry,
 +					 u8 num_spread_of_entries,
 +					 u8 strict_app_pris)
 +{
 +
 +	if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
 +					 num_spread_of_entries,
 +					 strict_app_pris)) {
 +		struct cos_entry_help_data *data = &cos_data->
 +						    data[entry];
 +		/* Fill BW entry */
 +		data->cos_bw = DCBX_INVALID_COS_BW;
 +		data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST;
 +		data->pri_join_mask = strict_app_pris;
 +		data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
 +				 data->pri_join_mask);
 +		return 1;
 +	}
 +
 +	return num_spread_of_entries;
 +}
 +
 +static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
 +					   struct pg_help_data *help_data,
 +					   struct dcbx_ets_feature *ets,
 +					   struct cos_help_data *cos_data,
 +					   u32 pri_join_mask)
 +
 +{
 +	u8 need_num_of_entries = 0;
 +	u8 i = 0;
 +	u8 entry = 0;
 +
 +	/*
 +	 * if the number of requested PG-s in CEE is greater than 3
 +	 * then the results are not determined since this is a violation
 +	 * of the standard.
 +	 */
 +	if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
 +		if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
 +					DCBX_COS_MAX_NUM_E3B0)) {
 +			BNX2X_ERR("Unable to reduce the number of PGs -"
 +				  "we will disables ETS\n");
 +			bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
 +							   pri_join_mask);
 +			return;
 +		}
 +	}
 +
 +	for (i = 0 ; i < help_data->num_of_pg; i++) {
 +		struct pg_entry_help_data *pg =  &help_data->data[i];
 +		if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
 +			struct cos_entry_help_data *data = &cos_data->
 +							    data[entry];
 +			/* Fill BW entry */
 +			data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
 +			data->strict = BNX2X_DCBX_STRICT_INVALID;
 +			data->pri_join_mask = pg->pg_priority;
 +			data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
 +						data->pri_join_mask);
 +
 +			entry++;
 +		} else {
 +			need_num_of_entries =  min_t(u8,
 +				(u8)pg->num_of_dif_pri,
 +				(u8)DCBX_COS_MAX_NUM_E3B0 -
 +						 help_data->num_of_pg + 1);
 +			/*
 +			 * If there are still VOQ-s which have no associated PG,
 +			 * then associate these VOQ-s to PG15. These PG-s will
 +			 * be used for SP between priorities on PG15.
 +			 */
 +			entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data,
 +				entry, need_num_of_entries, pg->pg_priority);
 +		}
 +	}
 +
 +	/* the entry will represent the number of COSes used */
 +	cos_data->num_of_cos = entry;
 +}
 +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
 +				       struct pg_help_data *help_data,
 +				       struct dcbx_ets_feature *ets,
 +				       u32 *pg_pri_orginal_spread)
 +{
 +	struct cos_help_data         cos_data;
 +	u8                    i                           = 0;
 +	u32                   pri_join_mask               = 0;
 +	u8                    num_of_dif_pri              = 0;
 +
 +	memset(&cos_data, 0, sizeof(cos_data));
 +
 +	/* Validate the pg value */
 +	for (i = 0; i < help_data->num_of_pg ; i++) {
 +		if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
 +		    DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
 +			BNX2X_ERR("Invalid pg[%d] data %x\n", i,
 +				  help_data->data[i].pg);
 +		pri_join_mask   |=  help_data->data[i].pg_priority;
 +		num_of_dif_pri  += help_data->data[i].num_of_dif_pri;
 +	}
 +
 +	/* defaults */
 +	cos_data.num_of_cos = 1;
 +	for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) {
 +		cos_data.data[i].pri_join_mask = 0;
 +		cos_data.data[i].pausable = false;
 +		cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID;
 +		cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
 +	}
 +
 +	if (CHIP_IS_E3B0(bp))
 +		bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets,
 +					       &cos_data, pri_join_mask);
 +	else /* E2 + E3A0 */
 +		bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp,
 +							  help_data, ets,
 +							  &cos_data,
 +							  pg_pri_orginal_spread,
 +							  pri_join_mask,
 +							  num_of_dif_pri);
 +
 +	for (i = 0; i < cos_data.num_of_cos ; i++) {
 +		struct bnx2x_dcbx_cos_params *p =
 +			&bp->dcbx_port_params.ets.cos_params[i];
 +
 +		p->strict = cos_data.data[i].strict;
 +		p->bw_tbl = cos_data.data[i].cos_bw;
 +		p->pri_bitmask = cos_data.data[i].pri_join_mask;
 +		p->pauseable = cos_data.data[i].pausable;
 +
 +		/* sanity */
 +		if (p->bw_tbl != DCBX_INVALID_COS_BW ||
 +		    p->strict != BNX2X_DCBX_STRICT_INVALID) {
 +			if (p->pri_bitmask == 0)
 +				BNX2X_ERR("Invalid pri_bitmask for %d\n", i);
 +
 +			if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) {
 +
 +				if (p->pauseable &&
 +				    DCBX_PFC_PRI_GET_NON_PAUSE(bp,
 +						p->pri_bitmask) != 0)
 +					BNX2X_ERR("Inconsistent config for "
 +						  "pausable COS %d\n", i);
 +
 +				if (!p->pauseable &&
 +				    DCBX_PFC_PRI_GET_PAUSE(bp,
 +						p->pri_bitmask) != 0)
 +					BNX2X_ERR("Inconsistent config for "
 +						  "nonpausable COS %d\n", i);
 +			}
 +		}
 +
 +		if (p->pauseable)
 +			DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
 +				  i, cos_data.data[i].pri_join_mask);
 +		else
 +			DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
 +					  "0x%x\n",
 +				  i, cos_data.data[i].pri_join_mask);
 +	}
 +
 +	bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
 +}
 +
 +static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
 +				u32 *set_configuration_ets_pg,
 +				u32 *pri_pg_tbl)
 +{
 +	int i;
 +
 +	for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
 +		set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
 +
 +		DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
 +		   i, set_configuration_ets_pg[i]);
 +	}
 +}
 +
 +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
 +				 struct bnx2x_func_tx_start_params *pfc_fw_cfg)
 +{
 +	u16 pri_bit = 0;
 +	u8 cos = 0, pri = 0;
 +	struct priority_cos *tt2cos;
 +	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
 +
 +	memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
 +
 +	/* to disable DCB - the structure must be zeroed */
 +	if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)
 +		return;
 +
 +	/*shortcut*/
 +	tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
 +
 +	/* Fw version should be incremented each update */
 +	pfc_fw_cfg->dcb_version = ++bp->dcb_version;
 +	pfc_fw_cfg->dcb_enabled = 1;
 +
 +	/* Fill priority parameters */
 +	for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
 +		tt2cos[pri].priority = ttp[pri];
 +		pri_bit = 1 << tt2cos[pri].priority;
 +
 +		/* Fill COS parameters based on COS calculated to
 +		 * make it more general for future use */
 +		for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
 +			if (bp->dcbx_port_params.ets.cos_params[cos].
 +						pri_bitmask & pri_bit)
 +					tt2cos[pri].cos = cos;
 +	}
 +
 +	/* we never want the FW to add a 0 vlan tag */
 +	pfc_fw_cfg->dont_add_pri_0_en = 1;
 +
 +	bnx2x_dcbx_print_cos_params(bp,	pfc_fw_cfg);
 +}
 +
 +void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
 +{
 +	/* if we need to syncronize DCBX result from prev PMF
 +	 * read it from shmem and update bp accordingly
 +	 */
 +	if (SHMEM2_HAS(bp, drv_flags) &&
 +	   GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
 +		/* Read neg results if dcbx is in the FW */
 +		if (bnx2x_dcbx_read_shmem_neg_results(bp))
 +			return;
 +
 +		bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
 +					  bp->dcbx_error);
 +		bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
 +					 bp->dcbx_error);
 +	}
 +}
 +
 +/* DCB netlink */
 +#ifdef BCM_DCBNL
 +
 +#define BNX2X_DCBX_CAPS		(DCB_CAP_DCBX_LLD_MANAGED | \
 +				DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
 +
 +static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
 +{
 +	/* validate dcbnl call that may change HW state:
 +	 * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
 +	 */
 +	return bp->dcb_state && bp->dcbx_mode_uset;
 +}
 +
 +static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
 +	return bp->dcb_state;
 +}
 +
 +static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
 +
 +	bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
 +	return 0;
 +}
 +
 +static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
 +					 u8 *perm_addr)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
 +
 +	/* first the HW mac address */
 +	memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
 +
 +#ifdef BCM_CNIC
 +	/* second SAN address */
 +	memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
 +#endif
 +}
 +
 +static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
 +					u8 prio_type, u8 pgid, u8 bw_pct,
 +					u8 up_map)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +
 +	DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
 +	if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
 +		return;
 +
 +	/**
 +	 * bw_pct ingnored -	band-width percentage devision between user
 +	 *			priorities within the same group is not
 +	 *			standard and hence not supported
 +	 *
 +	 * prio_type igonred -	priority levels within the same group are not
 +	 *			standard and hence are not supported. According
 +	 *			to the standard pgid 15 is dedicated to strict
 +	 *			prioirty traffic (on the port level).
 +	 *
 +	 * up_map ignored
 +	 */
 +
 +	bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
 +	bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
 +}
 +
 +static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
 +					 int pgid, u8 bw_pct)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
 +
 +	if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
 +		return;
 +
 +	bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
 +	bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
 +}
 +
 +static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
 +					u8 prio_type, u8 pgid, u8 bw_pct,
 +					u8 up_map)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
 +}
 +
 +static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
 +					 int pgid, u8 bw_pct)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
 +}
 +
 +static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
 +					u8 *prio_type, u8 *pgid, u8 *bw_pct,
 +					u8 *up_map)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "prio = %d\n", prio);
 +
 +	/**
 +	 * bw_pct ingnored -	band-width percentage devision between user
 +	 *			priorities within the same group is not
 +	 *			standard and hence not supported
 +	 *
 +	 * prio_type igonred -	priority levels within the same group are not
 +	 *			standard and hence are not supported. According
 +	 *			to the standard pgid 15 is dedicated to strict
 +	 *			prioirty traffic (on the port level).
 +	 *
 +	 * up_map ignored
 +	 */
 +	*up_map = *bw_pct = *prio_type = *pgid = 0;
 +
 +	if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
 +		return;
 +
 +	*pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
 +}
 +
 +static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
 +					 int pgid, u8 *bw_pct)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
 +
 +	*bw_pct = 0;
 +
 +	if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
 +		return;
 +
 +	*bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
 +}
 +
 +static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
 +					u8 *prio_type, u8 *pgid, u8 *bw_pct,
 +					u8 *up_map)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
 +
 +	*prio_type = *pgid = *bw_pct = *up_map = 0;
 +}
 +
 +static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
 +					 int pgid, u8 *bw_pct)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
 +
 +	*bw_pct = 0;
 +}
 +
 +static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
 +				    u8 setting)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
 +
 +	if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
 +		return;
 +
 +	bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
 +
 +	if (setting)
 +		bp->dcbx_config_params.admin_pfc_tx_enable = 1;
 +}
 +
 +static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
 +				    u8 *setting)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "prio = %d\n", prio);
 +
 +	*setting = 0;
 +
 +	if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
 +		return;
 +
 +	*setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
 +}
 +
 +static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	int rc = 0;
 +
 +	DP(NETIF_MSG_LINK, "SET-ALL\n");
 +
 +	if (!bnx2x_dcbnl_set_valid(bp))
 +		return 1;
 +
 +	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 +		netdev_err(bp->dev, "Handling parity error recovery. "
 +				"Try again later\n");
 +		return 1;
 +	}
 +	if (netif_running(bp->dev)) {
 +		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 +		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
 +	}
 +	DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
 +	if (rc)
 +		return 1;
 +
 +	return 0;
 +}
 +
 +static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	u8 rval = 0;
 +
 +	if (bp->dcb_state) {
 +		switch (capid) {
 +		case DCB_CAP_ATTR_PG:
 +			*cap = true;
 +			break;
 +		case DCB_CAP_ATTR_PFC:
 +			*cap = true;
 +			break;
 +		case DCB_CAP_ATTR_UP2TC:
 +			*cap = false;
 +			break;
 +		case DCB_CAP_ATTR_PG_TCS:
 +			*cap = 0x80;	/* 8 priorities for PGs */
 +			break;
 +		case DCB_CAP_ATTR_PFC_TCS:
 +			*cap = 0x80;	/* 8 priorities for PFC */
 +			break;
 +		case DCB_CAP_ATTR_GSP:
 +			*cap = true;
 +			break;
 +		case DCB_CAP_ATTR_BCN:
 +			*cap = false;
 +			break;
 +		case DCB_CAP_ATTR_DCBX:
 +			*cap = BNX2X_DCBX_CAPS;
++			break;
 +		default:
 +			rval = -EINVAL;
 +			break;
 +		}
 +	} else
 +		rval = -EINVAL;
 +
 +	DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
 +	return rval;
 +}
 +
 +static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	u8 rval = 0;
 +
 +	DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
 +
 +	if (bp->dcb_state) {
 +		switch (tcid) {
 +		case DCB_NUMTCS_ATTR_PG:
 +			*num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
 +						  DCBX_COS_MAX_NUM_E2;
 +			break;
 +		case DCB_NUMTCS_ATTR_PFC:
 +			*num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
 +						  DCBX_COS_MAX_NUM_E2;
 +			break;
 +		default:
 +			rval = -EINVAL;
 +			break;
 +		}
 +	} else
 +		rval = -EINVAL;
 +
 +	return rval;
 +}
 +
 +static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
 +	return -EINVAL;
 +}
 +
 +static u8  bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
 +
 +	if (!bp->dcb_state)
 +		return 0;
 +
 +	return bp->dcbx_local_feat.pfc.enabled;
 +}
 +
 +static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
 +
 +	if (!bnx2x_dcbnl_set_valid(bp))
 +		return;
 +
 +	bp->dcbx_config_params.admin_pfc_tx_enable =
 +	bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
 +}
 +
 +static void bnx2x_admin_app_set_ent(
 +	struct bnx2x_admin_priority_app_table *app_ent,
 +	u8 idtype, u16 idval, u8 up)
 +{
 +	app_ent->valid = 1;
 +
 +	switch (idtype) {
 +	case DCB_APP_IDTYPE_ETHTYPE:
 +		app_ent->traffic_type = TRAFFIC_TYPE_ETH;
 +		break;
 +	case DCB_APP_IDTYPE_PORTNUM:
 +		app_ent->traffic_type = TRAFFIC_TYPE_PORT;
 +		break;
 +	default:
 +		break; /* never gets here */
 +	}
 +	app_ent->app_id = idval;
 +	app_ent->priority = up;
 +}
 +
 +static bool bnx2x_admin_app_is_equal(
 +	struct bnx2x_admin_priority_app_table *app_ent,
 +	u8 idtype, u16 idval)
 +{
 +	if (!app_ent->valid)
 +		return false;
 +
 +	switch (idtype) {
 +	case DCB_APP_IDTYPE_ETHTYPE:
 +		if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
 +			return false;
 +		break;
 +	case DCB_APP_IDTYPE_PORTNUM:
 +		if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
 +			return false;
 +		break;
 +	default:
 +		return false;
 +	}
 +	if (app_ent->app_id != idval)
 +		return false;
 +
 +	return true;
 +}
 +
 +static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
 +{
 +	int i, ff;
 +
 +	/* iterate over the app entries looking for idtype and idval */
 +	for (i = 0, ff = -1; i < 4; i++) {
 +		struct bnx2x_admin_priority_app_table *app_ent =
 +			&bp->dcbx_config_params.admin_priority_app_table[i];
 +		if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
 +			break;
 +
 +		if (ff < 0 && !app_ent->valid)
 +			ff = i;
 +	}
 +	if (i < 4)
 +		/* if found overwrite up */
 +		bp->dcbx_config_params.
 +			admin_priority_app_table[i].priority = up;
 +	else if (ff >= 0)
 +		/* not found use first-free */
 +		bnx2x_admin_app_set_ent(
 +			&bp->dcbx_config_params.admin_priority_app_table[ff],
 +			idtype, idval, up);
 +	else
 +		/* app table is full */
 +		return -EBUSY;
 +
 +	/* up configured, if not 0 make sure feature is enabled */
 +	if (up)
 +		bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
 +
 +	return 0;
 +}
 +
 +static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
 +				 u16 idval, u8 up)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +
 +	DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
 +	   idtype, idval, up);
 +
 +	if (!bnx2x_dcbnl_set_valid(bp))
 +		return -EINVAL;
 +
 +	/* verify idtype */
 +	switch (idtype) {
 +	case DCB_APP_IDTYPE_ETHTYPE:
 +	case DCB_APP_IDTYPE_PORTNUM:
 +		break;
 +	default:
 +		return -EINVAL;
 +	}
 +	return bnx2x_set_admin_app_up(bp, idtype, idval, up);
 +}
 +
 +static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	u8 state;
 +
 +	state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
 +
 +	if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
 +		state |= DCB_CAP_DCBX_STATIC;
 +
 +	return state;
 +}
 +
 +static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	DP(NETIF_MSG_LINK, "state = %02x\n", state);
 +
 +	/* set dcbx mode */
 +
 +	if ((state & BNX2X_DCBX_CAPS) != state) {
 +		BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
 +			  "capabilities\n", state);
 +		return 1;
 +	}
 +
 +	if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
 +		BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
 +		return 1;
 +	}
 +
 +	if (state & DCB_CAP_DCBX_STATIC)
 +		bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
 +	else
 +		bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
 +
 +	bp->dcbx_mode_uset = true;
 +	return 0;
 +}
 +
 +static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
 +				  u8 *flags)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	u8 rval = 0;
 +
 +	DP(NETIF_MSG_LINK, "featid %d\n", featid);
 +
 +	if (bp->dcb_state) {
 +		*flags = 0;
 +		switch (featid) {
 +		case DCB_FEATCFG_ATTR_PG:
 +			if (bp->dcbx_local_feat.ets.enabled)
 +				*flags |= DCB_FEATCFG_ENABLE;
 +			if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
 +				*flags |= DCB_FEATCFG_ERROR;
 +			break;
 +		case DCB_FEATCFG_ATTR_PFC:
 +			if (bp->dcbx_local_feat.pfc.enabled)
 +				*flags |= DCB_FEATCFG_ENABLE;
 +			if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
 +			    DCBX_LOCAL_PFC_MISMATCH))
 +				*flags |= DCB_FEATCFG_ERROR;
 +			break;
 +		case DCB_FEATCFG_ATTR_APP:
 +			if (bp->dcbx_local_feat.app.enabled)
 +				*flags |= DCB_FEATCFG_ENABLE;
 +			if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
 +			    DCBX_LOCAL_APP_MISMATCH))
 +				*flags |= DCB_FEATCFG_ERROR;
 +			break;
 +		default:
 +			rval = -EINVAL;
 +			break;
 +		}
 +	} else
 +		rval = -EINVAL;
 +
 +	return rval;
 +}
 +
 +static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
 +				  u8 flags)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	u8 rval = 0;
 +
 +	DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
 +
 +	/* ignore the 'advertise' flag */
 +	if (bnx2x_dcbnl_set_valid(bp)) {
 +		switch (featid) {
 +		case DCB_FEATCFG_ATTR_PG:
 +			bp->dcbx_config_params.admin_ets_enable =
 +				flags & DCB_FEATCFG_ENABLE ? 1 : 0;
 +			bp->dcbx_config_params.admin_ets_willing =
 +				flags & DCB_FEATCFG_WILLING ? 1 : 0;
 +			break;
 +		case DCB_FEATCFG_ATTR_PFC:
 +			bp->dcbx_config_params.admin_pfc_enable =
 +				flags & DCB_FEATCFG_ENABLE ? 1 : 0;
 +			bp->dcbx_config_params.admin_pfc_willing =
 +				flags & DCB_FEATCFG_WILLING ? 1 : 0;
 +			break;
 +		case DCB_FEATCFG_ATTR_APP:
 +			/* ignore enable, always enabled */
 +			bp->dcbx_config_params.admin_app_priority_willing =
 +				flags & DCB_FEATCFG_WILLING ? 1 : 0;
 +			break;
 +		default:
 +			rval = -EINVAL;
 +			break;
 +		}
 +	} else
 +		rval = -EINVAL;
 +
 +	return rval;
 +}
 +
 +static int bnx2x_peer_appinfo(struct net_device *netdev,
 +			      struct dcb_peer_app_info *info, u16* app_count)
 +{
 +	int i;
 +	struct bnx2x *bp = netdev_priv(netdev);
 +
 +	DP(NETIF_MSG_LINK, "APP-INFO\n");
 +
 +	info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
 +	info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
 +	*app_count = 0;
 +
 +	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
 +		if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield &
 +		    DCBX_APP_ENTRY_VALID)
 +			(*app_count)++;
 +	return 0;
 +}
 +
 +static int bnx2x_peer_apptable(struct net_device *netdev,
 +			       struct dcb_app *table)
 +{
 +	int i, j;
 +	struct bnx2x *bp = netdev_priv(netdev);
 +
 +	DP(NETIF_MSG_LINK, "APP-TABLE\n");
 +
 +	for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
 +		struct dcbx_app_priority_entry *ent =
 +			&bp->dcbx_remote_feat.app.app_pri_tbl[i];
 +
 +		if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
 +			table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
 +			table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent);
 +			table[j++].protocol = ent->app_id;
 +		}
 +	}
 +	return 0;
 +}
 +
 +static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg)
 +{
 +	int i;
 +	struct bnx2x *bp = netdev_priv(netdev);
 +
 +	pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0;
 +
 +	for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
 +		pg->pg_bw[i] =
 +			DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i);
 +		pg->prio_pg[i] =
 +			DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i);
 +	}
 +	return 0;
 +}
 +
 +static int bnx2x_cee_peer_getpfc(struct net_device *netdev,
 +				 struct cee_pfc *pfc)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps;
 +	pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap;
 +	return 0;
 +}
 +
 +const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
 +	.getstate		= bnx2x_dcbnl_get_state,
 +	.setstate		= bnx2x_dcbnl_set_state,
 +	.getpermhwaddr		= bnx2x_dcbnl_get_perm_hw_addr,
 +	.setpgtccfgtx		= bnx2x_dcbnl_set_pg_tccfg_tx,
 +	.setpgbwgcfgtx		= bnx2x_dcbnl_set_pg_bwgcfg_tx,
 +	.setpgtccfgrx		= bnx2x_dcbnl_set_pg_tccfg_rx,
 +	.setpgbwgcfgrx		= bnx2x_dcbnl_set_pg_bwgcfg_rx,
 +	.getpgtccfgtx		= bnx2x_dcbnl_get_pg_tccfg_tx,
 +	.getpgbwgcfgtx		= bnx2x_dcbnl_get_pg_bwgcfg_tx,
 +	.getpgtccfgrx		= bnx2x_dcbnl_get_pg_tccfg_rx,
 +	.getpgbwgcfgrx		= bnx2x_dcbnl_get_pg_bwgcfg_rx,
 +	.setpfccfg		= bnx2x_dcbnl_set_pfc_cfg,
 +	.getpfccfg		= bnx2x_dcbnl_get_pfc_cfg,
 +	.setall			= bnx2x_dcbnl_set_all,
 +	.getcap			= bnx2x_dcbnl_get_cap,
 +	.getnumtcs		= bnx2x_dcbnl_get_numtcs,
 +	.setnumtcs		= bnx2x_dcbnl_set_numtcs,
 +	.getpfcstate		= bnx2x_dcbnl_get_pfc_state,
 +	.setpfcstate		= bnx2x_dcbnl_set_pfc_state,
 +	.setapp			= bnx2x_dcbnl_set_app_up,
 +	.getdcbx		= bnx2x_dcbnl_get_dcbx,
 +	.setdcbx		= bnx2x_dcbnl_set_dcbx,
 +	.getfeatcfg		= bnx2x_dcbnl_get_featcfg,
 +	.setfeatcfg		= bnx2x_dcbnl_set_featcfg,
 +	.peer_getappinfo	= bnx2x_peer_appinfo,
 +	.peer_getapptable	= bnx2x_peer_apptable,
 +	.cee_peer_getpg		= bnx2x_cee_peer_getpg,
 +	.cee_peer_getpfc	= bnx2x_cee_peer_getpfc,
 +};
 +
 +#endif /* BCM_DCBNL */
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 28bde16,0000000..6486ab8
mode 100644,000000..100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@@ -1,11607 -1,0 +1,11617 @@@
 +/* bnx2x_main.c: Broadcom Everest network driver.
 + *
 + * Copyright (c) 2007-2011 Broadcom Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation.
 + *
 + * Maintained by: Eilon Greenstein <eilong at broadcom.com>
 + * Written by: Eliezer Tamir
 + * Based on code from Michael Chan's bnx2 driver
 + * UDP CSUM errata workaround by Arik Gendelman
 + * Slowpath and fastpath rework by Vladislav Zolotarov
 + * Statistics and Link management by Yitchak Gertner
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/module.h>
 +#include <linux/moduleparam.h>
 +#include <linux/kernel.h>
 +#include <linux/device.h>  /* for dev_info() */
 +#include <linux/timer.h>
 +#include <linux/errno.h>
 +#include <linux/ioport.h>
 +#include <linux/slab.h>
 +#include <linux/interrupt.h>
 +#include <linux/pci.h>
 +#include <linux/init.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/skbuff.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/bitops.h>
 +#include <linux/irq.h>
 +#include <linux/delay.h>
 +#include <asm/byteorder.h>
 +#include <linux/time.h>
 +#include <linux/ethtool.h>
 +#include <linux/mii.h>
 +#include <linux/if.h>
 +#include <linux/if_vlan.h>
 +#include <net/ip.h>
 +#include <net/ipv6.h>
 +#include <net/tcp.h>
 +#include <net/checksum.h>
 +#include <net/ip6_checksum.h>
 +#include <linux/workqueue.h>
 +#include <linux/crc32.h>
 +#include <linux/crc32c.h>
 +#include <linux/prefetch.h>
 +#include <linux/zlib.h>
 +#include <linux/io.h>
 +#include <linux/stringify.h>
 +#include <linux/vmalloc.h>
 +
 +#include "bnx2x.h"
 +#include "bnx2x_init.h"
 +#include "bnx2x_init_ops.h"
 +#include "bnx2x_cmn.h"
 +#include "bnx2x_dcb.h"
 +#include "bnx2x_sp.h"
 +
 +#include <linux/firmware.h>
 +#include "bnx2x_fw_file_hdr.h"
 +/* FW files */
 +#define FW_FILE_VERSION					\
 +	__stringify(BCM_5710_FW_MAJOR_VERSION) "."	\
 +	__stringify(BCM_5710_FW_MINOR_VERSION) "."	\
 +	__stringify(BCM_5710_FW_REVISION_VERSION) "."	\
 +	__stringify(BCM_5710_FW_ENGINEERING_VERSION)
 +#define FW_FILE_NAME_E1		"bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
 +#define FW_FILE_NAME_E1H	"bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
 +#define FW_FILE_NAME_E2		"bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
 +
 +/* Time in jiffies before concluding the transmitter is hung */
 +#define TX_TIMEOUT		(5*HZ)
 +
 +static char version[] __devinitdata =
 +	"Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
 +	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 +
 +MODULE_AUTHOR("Eliezer Tamir");
 +MODULE_DESCRIPTION("Broadcom NetXtreme II "
 +		   "BCM57710/57711/57711E/"
 +		   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
 +		   "57840/57840_MF Driver");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(DRV_MODULE_VERSION);
 +MODULE_FIRMWARE(FW_FILE_NAME_E1);
 +MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 +MODULE_FIRMWARE(FW_FILE_NAME_E2);
 +
 +static int multi_mode = 1;
 +module_param(multi_mode, int, 0);
 +MODULE_PARM_DESC(multi_mode, " Multi queue mode "
 +			     "(0 Disable; 1 Enable (default))");
 +
 +int num_queues;
 +module_param(num_queues, int, 0);
 +MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
 +				" (default is as a number of CPUs)");
 +
 +static int disable_tpa;
 +module_param(disable_tpa, int, 0);
 +MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
 +
 +#define INT_MODE_INTx			1
 +#define INT_MODE_MSI			2
 +static int int_mode;
 +module_param(int_mode, int, 0);
 +MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
 +				"(1 INT#x; 2 MSI)");
 +
 +static int dropless_fc;
 +module_param(dropless_fc, int, 0);
 +MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
 +
 +static int poll;
 +module_param(poll, int, 0);
 +MODULE_PARM_DESC(poll, " Use polling (for debug)");
 +
 +static int mrrs = -1;
 +module_param(mrrs, int, 0);
 +MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
 +
 +static int debug;
 +module_param(debug, int, 0);
 +MODULE_PARM_DESC(debug, " Default debug msglevel");
 +
 +
 +
 +struct workqueue_struct *bnx2x_wq;
 +
 +enum bnx2x_board_type {
 +	BCM57710 = 0,
 +	BCM57711,
 +	BCM57711E,
 +	BCM57712,
 +	BCM57712_MF,
 +	BCM57800,
 +	BCM57800_MF,
 +	BCM57810,
 +	BCM57810_MF,
 +	BCM57840,
 +	BCM57840_MF
 +};
 +
 +/* indexed by board_type, above */
 +static struct {
 +	char *name;
 +} board_info[] __devinitdata = {
 +	{ "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
 +	{ "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
 +	{ "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
 +	{ "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
 +	{ "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
 +	{ "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
 +	{ "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
 +	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
 +	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
 +	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
 +	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
 +						"Ethernet Multi Function"}
 +};
 +
 +#ifndef PCI_DEVICE_ID_NX2_57710
 +#define PCI_DEVICE_ID_NX2_57710		CHIP_NUM_57710
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57711
 +#define PCI_DEVICE_ID_NX2_57711		CHIP_NUM_57711
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57711E
 +#define PCI_DEVICE_ID_NX2_57711E	CHIP_NUM_57711E
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57712
 +#define PCI_DEVICE_ID_NX2_57712		CHIP_NUM_57712
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57712_MF
 +#define PCI_DEVICE_ID_NX2_57712_MF	CHIP_NUM_57712_MF
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57800
 +#define PCI_DEVICE_ID_NX2_57800		CHIP_NUM_57800
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57800_MF
 +#define PCI_DEVICE_ID_NX2_57800_MF	CHIP_NUM_57800_MF
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57810
 +#define PCI_DEVICE_ID_NX2_57810		CHIP_NUM_57810
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57810_MF
 +#define PCI_DEVICE_ID_NX2_57810_MF	CHIP_NUM_57810_MF
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57840
 +#define PCI_DEVICE_ID_NX2_57840		CHIP_NUM_57840
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57840_MF
 +#define PCI_DEVICE_ID_NX2_57840_MF	CHIP_NUM_57840_MF
 +#endif
 +static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
 +	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
 +	{ 0 }
 +};
 +
 +MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
 +
 +/****************************************************************************
 +* General service functions
 +****************************************************************************/
 +
 +static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
 +				       u32 addr, dma_addr_t mapping)
 +{
 +	REG_WR(bp,  addr, U64_LO(mapping));
 +	REG_WR(bp,  addr + 4, U64_HI(mapping));
 +}
 +
 +static inline void storm_memset_spq_addr(struct bnx2x *bp,
 +					 dma_addr_t mapping, u16 abs_fid)
 +{
 +	u32 addr = XSEM_REG_FAST_MEMORY +
 +			XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
 +
 +	__storm_memset_dma_mapping(bp, addr, mapping);
 +}
 +
 +static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
 +					 u16 pf_id)
 +{
 +	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
 +		pf_id);
 +	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
 +		pf_id);
 +	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
 +		pf_id);
 +	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
 +		pf_id);
 +}
 +
 +static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
 +					u8 enable)
 +{
 +	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
 +		enable);
 +	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
 +		enable);
 +	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
 +		enable);
 +	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
 +		enable);
 +}
 +
 +static inline void storm_memset_eq_data(struct bnx2x *bp,
 +				struct event_ring_data *eq_data,
 +				u16 pfid)
 +{
 +	size_t size = sizeof(struct event_ring_data);
 +
 +	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
 +
 +	__storm_memset_struct(bp, addr, size, (u32 *)eq_data);
 +}
 +
 +static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
 +					u16 pfid)
 +{
 +	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
 +	REG_WR16(bp, addr, eq_prod);
 +}
 +
 +/* used only at init
 + * locking is done by mcp
 + */
 +static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
 +{
 +	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
 +	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
 +	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 +			       PCICFG_VENDOR_ID_OFFSET);
 +}
 +
 +static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
 +{
 +	u32 val;
 +
 +	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
 +	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
 +	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 +			       PCICFG_VENDOR_ID_OFFSET);
 +
 +	return val;
 +}
 +
 +#define DMAE_DP_SRC_GRC		"grc src_addr [%08x]"
 +#define DMAE_DP_SRC_PCI		"pci src_addr [%x:%08x]"
 +#define DMAE_DP_DST_GRC		"grc dst_addr [%08x]"
 +#define DMAE_DP_DST_PCI		"pci dst_addr [%x:%08x]"
 +#define DMAE_DP_DST_NONE	"dst_addr [none]"
 +
 +static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
 +			  int msglvl)
 +{
 +	u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
 +
 +	switch (dmae->opcode & DMAE_COMMAND_DST) {
 +	case DMAE_CMD_DST_PCI:
 +		if (src_type == DMAE_CMD_SRC_PCI)
 +			DP(msglvl, "DMAE: opcode 0x%08x\n"
 +			   "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
 +			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 +			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 +			   dmae->comp_addr_hi, dmae->comp_addr_lo,
 +			   dmae->comp_val);
 +		else
 +			DP(msglvl, "DMAE: opcode 0x%08x\n"
 +			   "src [%08x], len [%d*4], dst [%x:%08x]\n"
 +			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +			   dmae->opcode, dmae->src_addr_lo >> 2,
 +			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 +			   dmae->comp_addr_hi, dmae->comp_addr_lo,
 +			   dmae->comp_val);
 +		break;
 +	case DMAE_CMD_DST_GRC:
 +		if (src_type == DMAE_CMD_SRC_PCI)
 +			DP(msglvl, "DMAE: opcode 0x%08x\n"
 +			   "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
 +			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 +			   dmae->len, dmae->dst_addr_lo >> 2,
 +			   dmae->comp_addr_hi, dmae->comp_addr_lo,
 +			   dmae->comp_val);
 +		else
 +			DP(msglvl, "DMAE: opcode 0x%08x\n"
 +			   "src [%08x], len [%d*4], dst [%08x]\n"
 +			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +			   dmae->opcode, dmae->src_addr_lo >> 2,
 +			   dmae->len, dmae->dst_addr_lo >> 2,
 +			   dmae->comp_addr_hi, dmae->comp_addr_lo,
 +			   dmae->comp_val);
 +		break;
 +	default:
 +		if (src_type == DMAE_CMD_SRC_PCI)
 +			DP(msglvl, "DMAE: opcode 0x%08x\n"
 +			   "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
 +			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 +			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 +			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 +			   dmae->comp_val);
 +		else
 +			DP(msglvl, "DMAE: opcode 0x%08x\n"
 +			   "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
 +			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 +			   dmae->opcode, dmae->src_addr_lo >> 2,
 +			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 +			   dmae->comp_val);
 +		break;
 +	}
 +
 +}
 +
 +/* copy command into DMAE command memory and set DMAE command go */
 +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
 +{
 +	u32 cmd_offset;
 +	int i;
 +
 +	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
 +	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
 +		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
 +
 +		DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
 +		   idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
 +	}
 +	REG_WR(bp, dmae_reg_go_c[idx], 1);
 +}
 +
 +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
 +{
 +	return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
 +			   DMAE_CMD_C_ENABLE);
 +}
 +
 +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
 +{
 +	return opcode & ~DMAE_CMD_SRC_RESET;
 +}
 +
 +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 +			     bool with_comp, u8 comp_type)
 +{
 +	u32 opcode = 0;
 +
 +	opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
 +		   (dst_type << DMAE_COMMAND_DST_SHIFT));
 +
 +	opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
 +
 +	opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
 +	opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
 +		   (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
 +	opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
 +
 +#ifdef __BIG_ENDIAN
 +	opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
 +#else
 +	opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
 +#endif
 +	if (with_comp)
 +		opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
 +	return opcode;
 +}
 +
 +static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
 +				      struct dmae_command *dmae,
 +				      u8 src_type, u8 dst_type)
 +{
 +	memset(dmae, 0, sizeof(struct dmae_command));
 +
 +	/* set the opcode */
 +	dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
 +					 true, DMAE_COMP_PCI);
 +
 +	/* fill in the completion parameters */
 +	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
 +	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
 +	dmae->comp_val = DMAE_COMP_VAL;
 +}
 +
 +/* issue a dmae command over the init-channel and wailt for completion */
 +static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
 +				      struct dmae_command *dmae)
 +{
 +	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
 +	int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
 +	int rc = 0;
 +
 +	DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
 +	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
 +	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 +
 +	/*
 +	 * Lock the dmae channel. Disable BHs to prevent a dead-lock
 +	 * as long as this code is called both from syscall context and
 +	 * from ndo_set_rx_mode() flow that may be called from BH.
 +	 */
 +	spin_lock_bh(&bp->dmae_lock);
 +
 +	/* reset completion */
 +	*wb_comp = 0;
 +
 +	/* post the command on the channel used for initializations */
 +	bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
 +
 +	/* wait for completion */
 +	udelay(5);
 +	while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
 +		DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
 +
 +		if (!cnt) {
 +			BNX2X_ERR("DMAE timeout!\n");
 +			rc = DMAE_TIMEOUT;
 +			goto unlock;
 +		}
 +		cnt--;
 +		udelay(50);
 +	}
 +	if (*wb_comp & DMAE_PCI_ERR_FLAG) {
 +		BNX2X_ERR("DMAE PCI error!\n");
 +		rc = DMAE_PCI_ERROR;
 +	}
 +
 +	DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
 +	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
 +	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 +
 +unlock:
 +	spin_unlock_bh(&bp->dmae_lock);
 +	return rc;
 +}
 +
 +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
 +		      u32 len32)
 +{
 +	struct dmae_command dmae;
 +
 +	if (!bp->dmae_ready) {
 +		u32 *data = bnx2x_sp(bp, wb_data[0]);
 +
 +		DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
 +		   "  using indirect\n", dst_addr, len32);
 +		bnx2x_init_ind_wr(bp, dst_addr, data, len32);
 +		return;
 +	}
 +
 +	/* set opcode and fixed command fields */
 +	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
 +
 +	/* fill in addresses and len */
 +	dmae.src_addr_lo = U64_LO(dma_addr);
 +	dmae.src_addr_hi = U64_HI(dma_addr);
 +	dmae.dst_addr_lo = dst_addr >> 2;
 +	dmae.dst_addr_hi = 0;
 +	dmae.len = len32;
 +
 +	bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
 +
 +	/* issue the command and wait for completion */
 +	bnx2x_issue_dmae_with_comp(bp, &dmae);
 +}
 +
 +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
 +{
 +	struct dmae_command dmae;
 +
 +	if (!bp->dmae_ready) {
 +		u32 *data = bnx2x_sp(bp, wb_data[0]);
 +		int i;
 +
 +		DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
 +		   "  using indirect\n", src_addr, len32);
 +		for (i = 0; i < len32; i++)
 +			data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
 +		return;
 +	}
 +
 +	/* set opcode and fixed command fields */
 +	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
 +
 +	/* fill in addresses and len */
 +	dmae.src_addr_lo = src_addr >> 2;
 +	dmae.src_addr_hi = 0;
 +	dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
 +	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
 +	dmae.len = len32;
 +
 +	bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
 +
 +	/* issue the command and wait for completion */
 +	bnx2x_issue_dmae_with_comp(bp, &dmae);
 +}
 +
 +static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
 +				      u32 addr, u32 len)
 +{
 +	int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
 +	int offset = 0;
 +
 +	while (len > dmae_wr_max) {
 +		bnx2x_write_dmae(bp, phys_addr + offset,
 +				 addr + offset, dmae_wr_max);
 +		offset += dmae_wr_max * 4;
 +		len -= dmae_wr_max;
 +	}
 +
 +	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
 +}
 +
 +/* used only for slowpath so not inlined */
 +static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
 +{
 +	u32 wb_write[2];
 +
 +	wb_write[0] = val_hi;
 +	wb_write[1] = val_lo;
 +	REG_WR_DMAE(bp, reg, wb_write, 2);
 +}
 +
 +#ifdef USE_WB_RD
 +static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
 +{
 +	u32 wb_data[2];
 +
 +	REG_RD_DMAE(bp, reg, wb_data, 2);
 +
 +	return HILO_U64(wb_data[0], wb_data[1]);
 +}
 +#endif
 +
 +static int bnx2x_mc_assert(struct bnx2x *bp)
 +{
 +	char last_idx;
 +	int i, rc = 0;
 +	u32 row0, row1, row2, row3;
 +
 +	/* XSTORM */
 +	last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
 +			   XSTORM_ASSERT_LIST_INDEX_OFFSET);
 +	if (last_idx)
 +		BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 +
 +	/* print the asserts */
 +	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 +
 +		row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 +			      XSTORM_ASSERT_LIST_OFFSET(i));
 +		row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 +			      XSTORM_ASSERT_LIST_OFFSET(i) + 4);
 +		row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 +			      XSTORM_ASSERT_LIST_OFFSET(i) + 8);
 +		row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 +			      XSTORM_ASSERT_LIST_OFFSET(i) + 12);
 +
 +		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 +			BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
 +				  " 0x%08x 0x%08x 0x%08x\n",
 +				  i, row3, row2, row1, row0);
 +			rc++;
 +		} else {
 +			break;
 +		}
 +	}
 +
 +	/* TSTORM */
 +	last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
 +			   TSTORM_ASSERT_LIST_INDEX_OFFSET);
 +	if (last_idx)
 +		BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 +
 +	/* print the asserts */
 +	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 +
 +		row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 +			      TSTORM_ASSERT_LIST_OFFSET(i));
 +		row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 +			      TSTORM_ASSERT_LIST_OFFSET(i) + 4);
 +		row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 +			      TSTORM_ASSERT_LIST_OFFSET(i) + 8);
 +		row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 +			      TSTORM_ASSERT_LIST_OFFSET(i) + 12);
 +
 +		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 +			BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
 +				  " 0x%08x 0x%08x 0x%08x\n",
 +				  i, row3, row2, row1, row0);
 +			rc++;
 +		} else {
 +			break;
 +		}
 +	}
 +
 +	/* CSTORM */
 +	last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
 +			   CSTORM_ASSERT_LIST_INDEX_OFFSET);
 +	if (last_idx)
 +		BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 +
 +	/* print the asserts */
 +	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 +
 +		row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +			      CSTORM_ASSERT_LIST_OFFSET(i));
 +		row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +			      CSTORM_ASSERT_LIST_OFFSET(i) + 4);
 +		row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +			      CSTORM_ASSERT_LIST_OFFSET(i) + 8);
 +		row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +			      CSTORM_ASSERT_LIST_OFFSET(i) + 12);
 +
 +		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 +			BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
 +				  " 0x%08x 0x%08x 0x%08x\n",
 +				  i, row3, row2, row1, row0);
 +			rc++;
 +		} else {
 +			break;
 +		}
 +	}
 +
 +	/* USTORM */
 +	last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
 +			   USTORM_ASSERT_LIST_INDEX_OFFSET);
 +	if (last_idx)
 +		BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 +
 +	/* print the asserts */
 +	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 +
 +		row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
 +			      USTORM_ASSERT_LIST_OFFSET(i));
 +		row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
 +			      USTORM_ASSERT_LIST_OFFSET(i) + 4);
 +		row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
 +			      USTORM_ASSERT_LIST_OFFSET(i) + 8);
 +		row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
 +			      USTORM_ASSERT_LIST_OFFSET(i) + 12);
 +
 +		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 +			BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
 +				  " 0x%08x 0x%08x 0x%08x\n",
 +				  i, row3, row2, row1, row0);
 +			rc++;
 +		} else {
 +			break;
 +		}
 +	}
 +
 +	return rc;
 +}
 +
 +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
 +{
 +	u32 addr, val;
 +	u32 mark, offset;
 +	__be32 data[9];
 +	int word;
 +	u32 trace_shmem_base;
 +	if (BP_NOMCP(bp)) {
 +		BNX2X_ERR("NO MCP - can not dump\n");
 +		return;
 +	}
 +	netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
 +		(bp->common.bc_ver & 0xff0000) >> 16,
 +		(bp->common.bc_ver & 0xff00) >> 8,
 +		(bp->common.bc_ver & 0xff));
 +
 +	val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
 +	if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
 +		printk("%s" "MCP PC at 0x%x\n", lvl, val);
 +
 +	if (BP_PATH(bp) == 0)
 +		trace_shmem_base = bp->common.shmem_base;
 +	else
 +		trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
 +	addr = trace_shmem_base - 0x0800 + 4;
 +	mark = REG_RD(bp, addr);
 +	mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 +			+ ((mark + 0x3) & ~0x3) - 0x08000000;
 +	printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
 +
 +	printk("%s", lvl);
 +	for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
 +		for (word = 0; word < 8; word++)
 +			data[word] = htonl(REG_RD(bp, offset + 4*word));
 +		data[8] = 0x0;
 +		pr_cont("%s", (char *)data);
 +	}
 +	for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
 +		for (word = 0; word < 8; word++)
 +			data[word] = htonl(REG_RD(bp, offset + 4*word));
 +		data[8] = 0x0;
 +		pr_cont("%s", (char *)data);
 +	}
 +	printk("%s" "end of fw dump\n", lvl);
 +}
 +
 +static inline void bnx2x_fw_dump(struct bnx2x *bp)
 +{
 +	bnx2x_fw_dump_lvl(bp, KERN_ERR);
 +}
 +
 +void bnx2x_panic_dump(struct bnx2x *bp)
 +{
 +	int i;
 +	u16 j;
 +	struct hc_sp_status_block_data sp_sb_data;
 +	int func = BP_FUNC(bp);
 +#ifdef BNX2X_STOP_ON_ERROR
 +	u16 start = 0, end = 0;
 +	u8 cos;
 +#endif
 +
 +	bp->stats_state = STATS_STATE_DISABLED;
 +	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
 +
 +	BNX2X_ERR("begin crash dump -----------------\n");
 +
 +	/* Indices */
 +	/* Common */
 +	BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
 +		  "  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
 +		  bp->def_idx, bp->def_att_idx, bp->attn_state,
 +		  bp->spq_prod_idx, bp->stats_counter);
 +	BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
 +		  bp->def_status_blk->atten_status_block.attn_bits,
 +		  bp->def_status_blk->atten_status_block.attn_bits_ack,
 +		  bp->def_status_blk->atten_status_block.status_block_id,
 +		  bp->def_status_blk->atten_status_block.attn_bits_index);
 +	BNX2X_ERR("     def (");
 +	for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
 +		pr_cont("0x%x%s",
 +			bp->def_status_blk->sp_sb.index_values[i],
 +			(i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
 +
 +	for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
 +		*((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +			CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
 +			i*sizeof(u32));
 +
 +	pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
 +	       sp_sb_data.igu_sb_id,
 +	       sp_sb_data.igu_seg_id,
 +	       sp_sb_data.p_func.pf_id,
 +	       sp_sb_data.p_func.vnic_id,
 +	       sp_sb_data.p_func.vf_id,
 +	       sp_sb_data.p_func.vf_valid,
 +	       sp_sb_data.state);
 +
 +
 +	for_each_eth_queue(bp, i) {
 +		struct bnx2x_fastpath *fp = &bp->fp[i];
 +		int loop;
 +		struct hc_status_block_data_e2 sb_data_e2;
 +		struct hc_status_block_data_e1x sb_data_e1x;
 +		struct hc_status_block_sm  *hc_sm_p =
 +			CHIP_IS_E1x(bp) ?
 +			sb_data_e1x.common.state_machine :
 +			sb_data_e2.common.state_machine;
 +		struct hc_index_data *hc_index_p =
 +			CHIP_IS_E1x(bp) ?
 +			sb_data_e1x.index_data :
 +			sb_data_e2.index_data;
 +		u8 data_size, cos;
 +		u32 *sb_data_p;
 +		struct bnx2x_fp_txdata txdata;
 +
 +		/* Rx */
 +		BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
 +			  "  rx_comp_prod(0x%x)"
 +			  "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
 +			  i, fp->rx_bd_prod, fp->rx_bd_cons,
 +			  fp->rx_comp_prod,
 +			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
 +		BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
 +			  "  fp_hc_idx(0x%x)\n",
 +			  fp->rx_sge_prod, fp->last_max_sge,
 +			  le16_to_cpu(fp->fp_hc_idx));
 +
 +		/* Tx */
 +		for_each_cos_in_tx_queue(fp, cos)
 +		{
 +			txdata = fp->txdata[cos];
 +			BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
 +				  "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
 +				  "  *tx_cons_sb(0x%x)\n",
 +				  i, txdata.tx_pkt_prod,
 +				  txdata.tx_pkt_cons, txdata.tx_bd_prod,
 +				  txdata.tx_bd_cons,
 +				  le16_to_cpu(*txdata.tx_cons_sb));
 +		}
 +
 +		loop = CHIP_IS_E1x(bp) ?
 +			HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
 +
 +		/* host sb data */
 +
 +#ifdef BCM_CNIC
 +		if (IS_FCOE_FP(fp))
 +			continue;
 +#endif
 +		BNX2X_ERR("     run indexes (");
 +		for (j = 0; j < HC_SB_MAX_SM; j++)
 +			pr_cont("0x%x%s",
 +			       fp->sb_running_index[j],
 +			       (j == HC_SB_MAX_SM - 1) ? ")" : " ");
 +
 +		BNX2X_ERR("     indexes (");
 +		for (j = 0; j < loop; j++)
 +			pr_cont("0x%x%s",
 +			       fp->sb_index_values[j],
 +			       (j == loop - 1) ? ")" : " ");
 +		/* fw sb data */
 +		data_size = CHIP_IS_E1x(bp) ?
 +			sizeof(struct hc_status_block_data_e1x) :
 +			sizeof(struct hc_status_block_data_e2);
 +		data_size /= sizeof(u32);
 +		sb_data_p = CHIP_IS_E1x(bp) ?
 +			(u32 *)&sb_data_e1x :
 +			(u32 *)&sb_data_e2;
 +		/* copy sb data in here */
 +		for (j = 0; j < data_size; j++)
 +			*(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +				CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
 +				j * sizeof(u32));
 +
 +		if (!CHIP_IS_E1x(bp)) {
 +			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
 +				"vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
 +				"state(0x%x)\n",
 +				sb_data_e2.common.p_func.pf_id,
 +				sb_data_e2.common.p_func.vf_id,
 +				sb_data_e2.common.p_func.vf_valid,
 +				sb_data_e2.common.p_func.vnic_id,
 +				sb_data_e2.common.same_igu_sb_1b,
 +				sb_data_e2.common.state);
 +		} else {
 +			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
 +				"vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
 +				"state(0x%x)\n",
 +				sb_data_e1x.common.p_func.pf_id,
 +				sb_data_e1x.common.p_func.vf_id,
 +				sb_data_e1x.common.p_func.vf_valid,
 +				sb_data_e1x.common.p_func.vnic_id,
 +				sb_data_e1x.common.same_igu_sb_1b,
 +				sb_data_e1x.common.state);
 +		}
 +
 +		/* SB_SMs data */
 +		for (j = 0; j < HC_SB_MAX_SM; j++) {
 +			pr_cont("SM[%d] __flags (0x%x) "
 +			       "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
 +			       "time_to_expire (0x%x) "
 +			       "timer_value(0x%x)\n", j,
 +			       hc_sm_p[j].__flags,
 +			       hc_sm_p[j].igu_sb_id,
 +			       hc_sm_p[j].igu_seg_id,
 +			       hc_sm_p[j].time_to_expire,
 +			       hc_sm_p[j].timer_value);
 +		}
 +
 +		/* Indecies data */
 +		for (j = 0; j < loop; j++) {
 +			pr_cont("INDEX[%d] flags (0x%x) "
 +					 "timeout (0x%x)\n", j,
 +			       hc_index_p[j].flags,
 +			       hc_index_p[j].timeout);
 +		}
 +	}
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +	/* Rings */
 +	/* Rx */
 +	for_each_rx_queue(bp, i) {
 +		struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
 +		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
 +		for (j = start; j != end; j = RX_BD(j + 1)) {
 +			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
 +			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
 +
 +			BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
 +				  i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
 +		}
 +
 +		start = RX_SGE(fp->rx_sge_prod);
 +		end = RX_SGE(fp->last_max_sge);
 +		for (j = start; j != end; j = RX_SGE(j + 1)) {
 +			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
 +			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
 +
 +			BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
 +				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
 +		}
 +
 +		start = RCQ_BD(fp->rx_comp_cons - 10);
 +		end = RCQ_BD(fp->rx_comp_cons + 503);
 +		for (j = start; j != end; j = RCQ_BD(j + 1)) {
 +			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
 +
 +			BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
 +				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
 +		}
 +	}
 +
 +	/* Tx */
 +	for_each_tx_queue(bp, i) {
 +		struct bnx2x_fastpath *fp = &bp->fp[i];
 +		for_each_cos_in_tx_queue(fp, cos) {
 +			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
 +
 +			start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
 +			end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
 +			for (j = start; j != end; j = TX_BD(j + 1)) {
 +				struct sw_tx_bd *sw_bd =
 +					&txdata->tx_buf_ring[j];
 +
 +				BNX2X_ERR("fp%d: txdata %d, "
 +					  "packet[%x]=[%p,%x]\n",
 +					  i, cos, j, sw_bd->skb,
 +					  sw_bd->first_bd);
 +			}
 +
 +			start = TX_BD(txdata->tx_bd_cons - 10);
 +			end = TX_BD(txdata->tx_bd_cons + 254);
 +			for (j = start; j != end; j = TX_BD(j + 1)) {
 +				u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
 +
 +				BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
 +					  "[%x:%x:%x:%x]\n",
 +					  i, cos, j, tx_bd[0], tx_bd[1],
 +					  tx_bd[2], tx_bd[3]);
 +			}
 +		}
 +	}
 +#endif
 +	bnx2x_fw_dump(bp);
 +	bnx2x_mc_assert(bp);
 +	BNX2X_ERR("end crash dump -----------------\n");
 +}
 +
 +/*
 + * FLR Support for E2
 + *
 + * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
 + * initialization.
 + */
 +#define FLR_WAIT_USEC		10000	/* 10 miliseconds */
 +#define FLR_WAIT_INTERAVAL	50	/* usec */
 +#define	FLR_POLL_CNT		(FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
 +
 +struct pbf_pN_buf_regs {
 +	int pN;
 +	u32 init_crd;
 +	u32 crd;
 +	u32 crd_freed;
 +};
 +
 +struct pbf_pN_cmd_regs {
 +	int pN;
 +	u32 lines_occup;
 +	u32 lines_freed;
 +};
 +
 +static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
 +				     struct pbf_pN_buf_regs *regs,
 +				     u32 poll_count)
 +{
 +	u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
 +	u32 cur_cnt = poll_count;
 +
 +	crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
 +	crd = crd_start = REG_RD(bp, regs->crd);
 +	init_crd = REG_RD(bp, regs->init_crd);
 +
 +	DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
 +	DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
 +	DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
 +
 +	while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
 +	       (init_crd - crd_start))) {
 +		if (cur_cnt--) {
 +			udelay(FLR_WAIT_INTERAVAL);
 +			crd = REG_RD(bp, regs->crd);
 +			crd_freed = REG_RD(bp, regs->crd_freed);
 +		} else {
 +			DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
 +			   regs->pN);
 +			DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
 +			   regs->pN, crd);
 +			DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
 +			   regs->pN, crd_freed);
 +			break;
 +		}
 +	}
 +	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
 +	   poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
 +}
 +
 +static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
 +				     struct pbf_pN_cmd_regs *regs,
 +				     u32 poll_count)
 +{
 +	u32 occup, to_free, freed, freed_start;
 +	u32 cur_cnt = poll_count;
 +
 +	occup = to_free = REG_RD(bp, regs->lines_occup);
 +	freed = freed_start = REG_RD(bp, regs->lines_freed);
 +
 +	DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
 +	DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
 +
 +	while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
 +		if (cur_cnt--) {
 +			udelay(FLR_WAIT_INTERAVAL);
 +			occup = REG_RD(bp, regs->lines_occup);
 +			freed = REG_RD(bp, regs->lines_freed);
 +		} else {
 +			DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
 +			   regs->pN);
 +			DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
 +			   regs->pN, occup);
 +			DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
 +			   regs->pN, freed);
 +			break;
 +		}
 +	}
 +	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
 +	   poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
 +}
 +
 +static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
 +				     u32 expected, u32 poll_count)
 +{
 +	u32 cur_cnt = poll_count;
 +	u32 val;
 +
 +	while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
 +		udelay(FLR_WAIT_INTERAVAL);
 +
 +	return val;
 +}
 +
 +static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
 +						  char *msg, u32 poll_cnt)
 +{
 +	u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
 +	if (val != 0) {
 +		BNX2X_ERR("%s usage count=%d\n", msg, val);
 +		return 1;
 +	}
 +	return 0;
 +}
 +
 +static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
 +{
 +	/* adjust polling timeout */
 +	if (CHIP_REV_IS_EMUL(bp))
 +		return FLR_POLL_CNT * 2000;
 +
 +	if (CHIP_REV_IS_FPGA(bp))
 +		return FLR_POLL_CNT * 120;
 +
 +	return FLR_POLL_CNT;
 +}
 +
 +static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
 +{
 +	struct pbf_pN_cmd_regs cmd_regs[] = {
 +		{0, (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_TQ_OCCUPANCY_Q0 :
 +			PBF_REG_P0_TQ_OCCUPANCY,
 +		    (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_TQ_LINES_FREED_CNT_Q0 :
 +			PBF_REG_P0_TQ_LINES_FREED_CNT},
 +		{1, (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_TQ_OCCUPANCY_Q1 :
 +			PBF_REG_P1_TQ_OCCUPANCY,
 +		    (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_TQ_LINES_FREED_CNT_Q1 :
 +			PBF_REG_P1_TQ_LINES_FREED_CNT},
 +		{4, (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_TQ_OCCUPANCY_LB_Q :
 +			PBF_REG_P4_TQ_OCCUPANCY,
 +		    (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
 +			PBF_REG_P4_TQ_LINES_FREED_CNT}
 +	};
 +
 +	struct pbf_pN_buf_regs buf_regs[] = {
 +		{0, (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_INIT_CRD_Q0 :
 +			PBF_REG_P0_INIT_CRD ,
 +		    (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_CREDIT_Q0 :
 +			PBF_REG_P0_CREDIT,
 +		    (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
 +			PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
 +		{1, (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_INIT_CRD_Q1 :
 +			PBF_REG_P1_INIT_CRD,
 +		    (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_CREDIT_Q1 :
 +			PBF_REG_P1_CREDIT,
 +		    (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
 +			PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
 +		{4, (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_INIT_CRD_LB_Q :
 +			PBF_REG_P4_INIT_CRD,
 +		    (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_CREDIT_LB_Q :
 +			PBF_REG_P4_CREDIT,
 +		    (CHIP_IS_E3B0(bp)) ?
 +			PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
 +			PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
 +	};
 +
 +	int i;
 +
 +	/* Verify the command queues are flushed P0, P1, P4 */
 +	for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
 +		bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
 +
 +
 +	/* Verify the transmission buffers are flushed P0, P1, P4 */
 +	for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
 +		bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
 +}
 +
 +#define OP_GEN_PARAM(param) \
 +	(((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
 +
 +#define OP_GEN_TYPE(type) \
 +	(((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
 +
 +#define OP_GEN_AGG_VECT(index) \
 +	(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
 +
 +
 +static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
 +					 u32 poll_cnt)
 +{
 +	struct sdm_op_gen op_gen = {0};
 +
 +	u32 comp_addr = BAR_CSTRORM_INTMEM +
 +			CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
 +	int ret = 0;
 +
 +	if (REG_RD(bp, comp_addr)) {
 +		BNX2X_ERR("Cleanup complete is not 0\n");
 +		return 1;
 +	}
 +
 +	op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
 +	op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
 +	op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
 +	op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
 +
 +	DP(BNX2X_MSG_SP, "FW Final cleanup\n");
 +	REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
 +
 +	if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
 +		BNX2X_ERR("FW final cleanup did not succeed\n");
 +		ret = 1;
 +	}
 +	/* Zero completion for nxt FLR */
 +	REG_WR(bp, comp_addr, 0);
 +
 +	return ret;
 +}
 +
 +static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
 +{
 +	int pos;
 +	u16 status;
 +
 +	pos = pci_pcie_cap(dev);
 +	if (!pos)
 +		return false;
 +
 +	pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
 +	return status & PCI_EXP_DEVSTA_TRPND;
 +}
 +
 +/* PF FLR specific routines
 +*/
 +static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
 +{
 +
 +	/* wait for CFC PF usage-counter to zero (includes all the VFs) */
 +	if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +			CFC_REG_NUM_LCIDS_INSIDE_PF,
 +			"CFC PF usage counter timed out",
 +			poll_cnt))
 +		return 1;
 +
 +
 +	/* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
 +	if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +			DORQ_REG_PF_USAGE_CNT,
 +			"DQ PF usage counter timed out",
 +			poll_cnt))
 +		return 1;
 +
 +	/* Wait for QM PF usage-counter to zero (until DQ cleanup) */
 +	if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +			QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
 +			"QM PF usage counter timed out",
 +			poll_cnt))
 +		return 1;
 +
 +	/* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
 +	if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +			TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
 +			"Timers VNIC usage counter timed out",
 +			poll_cnt))
 +		return 1;
 +	if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +			TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
 +			"Timers NUM_SCANS usage counter timed out",
 +			poll_cnt))
 +		return 1;
 +
 +	/* Wait DMAE PF usage counter to zero */
 +	if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +			dmae_reg_go_c[INIT_DMAE_C(bp)],
 +			"DMAE dommand register timed out",
 +			poll_cnt))
 +		return 1;
 +
 +	return 0;
 +}
 +
 +static void bnx2x_hw_enable_status(struct bnx2x *bp)
 +{
 +	u32 val;
 +
 +	val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
 +	DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
 +
 +	val = REG_RD(bp, PBF_REG_DISABLE_PF);
 +	DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
 +
 +	val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
 +	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
 +
 +	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
 +	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
 +
 +	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
 +	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
 +
 +	val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
 +	DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
 +
 +	val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
 +	DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
 +
 +	val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
 +	DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
 +	   val);
 +}
 +
 +static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
 +{
 +	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
 +
 +	DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
 +
 +	/* Re-enable PF target read access */
 +	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 +
 +	/* Poll HW usage counters */
 +	if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
 +		return -EBUSY;
 +
 +	/* Zero the igu 'trailing edge' and 'leading edge' */
 +
 +	/* Send the FW cleanup command */
 +	if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
 +		return -EBUSY;
 +
 +	/* ATC cleanup */
 +
 +	/* Verify TX hw is flushed */
 +	bnx2x_tx_hw_flushed(bp, poll_cnt);
 +
 +	/* Wait 100ms (not adjusted according to platform) */
 +	msleep(100);
 +
 +	/* Verify no pending pci transactions */
 +	if (bnx2x_is_pcie_pending(bp->pdev))
 +		BNX2X_ERR("PCIE Transactions still pending\n");
 +
 +	/* Debug */
 +	bnx2x_hw_enable_status(bp);
 +
 +	/*
 +	 * Master enable - Due to WB DMAE writes performed before this
 +	 * register is re-initialized as part of the regular function init
 +	 */
 +	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
 +
 +	return 0;
 +}
 +
 +static void bnx2x_hc_int_enable(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
 +	u32 val = REG_RD(bp, addr);
 +	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 +	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
 +
 +	if (msix) {
 +		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +			 HC_CONFIG_0_REG_INT_LINE_EN_0);
 +		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 +			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +	} else if (msi) {
 +		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
 +		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 +			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +	} else {
 +		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 +			HC_CONFIG_0_REG_INT_LINE_EN_0 |
 +			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +
 +		if (!CHIP_IS_E1(bp)) {
 +			DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
 +			   val, port, addr);
 +
 +			REG_WR(bp, addr, val);
 +
 +			val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
 +		}
 +	}
 +
 +	if (CHIP_IS_E1(bp))
 +		REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
 +
 +	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
 +	   val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
 +
 +	REG_WR(bp, addr, val);
 +	/*
 +	 * Ensure that HC_CONFIG is written before leading/trailing edge config
 +	 */
 +	mmiowb();
 +	barrier();
 +
 +	if (!CHIP_IS_E1(bp)) {
 +		/* init leading/trailing edge */
 +		if (IS_MF(bp)) {
 +			val = (0xee0f | (1 << (BP_VN(bp) + 4)));
 +			if (bp->port.pmf)
 +				/* enable nig and gpio3 attention */
 +				val |= 0x1100;
 +		} else
 +			val = 0xffff;
 +
 +		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
 +		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
 +	}
 +
 +	/* Make sure that interrupts are indeed enabled from here on */
 +	mmiowb();
 +}
 +
 +static void bnx2x_igu_int_enable(struct bnx2x *bp)
 +{
 +	u32 val;
 +	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 +	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
 +
 +	val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 +
 +	if (msix) {
 +		val &= ~(IGU_PF_CONF_INT_LINE_EN |
 +			 IGU_PF_CONF_SINGLE_ISR_EN);
 +		val |= (IGU_PF_CONF_FUNC_EN |
 +			IGU_PF_CONF_MSI_MSIX_EN |
 +			IGU_PF_CONF_ATTN_BIT_EN);
 +	} else if (msi) {
 +		val &= ~IGU_PF_CONF_INT_LINE_EN;
 +		val |= (IGU_PF_CONF_FUNC_EN |
 +			IGU_PF_CONF_MSI_MSIX_EN |
 +			IGU_PF_CONF_ATTN_BIT_EN |
 +			IGU_PF_CONF_SINGLE_ISR_EN);
 +	} else {
 +		val &= ~IGU_PF_CONF_MSI_MSIX_EN;
 +		val |= (IGU_PF_CONF_FUNC_EN |
 +			IGU_PF_CONF_INT_LINE_EN |
 +			IGU_PF_CONF_ATTN_BIT_EN |
 +			IGU_PF_CONF_SINGLE_ISR_EN);
 +	}
 +
 +	DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
 +	   val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
 +
 +	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 +
 +	barrier();
 +
 +	/* init leading/trailing edge */
 +	if (IS_MF(bp)) {
 +		val = (0xee0f | (1 << (BP_VN(bp) + 4)));
 +		if (bp->port.pmf)
 +			/* enable nig and gpio3 attention */
 +			val |= 0x1100;
 +	} else
 +		val = 0xffff;
 +
 +	REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
 +	REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
 +
 +	/* Make sure that interrupts are indeed enabled from here on */
 +	mmiowb();
 +}
 +
 +void bnx2x_int_enable(struct bnx2x *bp)
 +{
 +	if (bp->common.int_block == INT_BLOCK_HC)
 +		bnx2x_hc_int_enable(bp);
 +	else
 +		bnx2x_igu_int_enable(bp);
 +}
 +
 +static void bnx2x_hc_int_disable(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
 +	u32 val = REG_RD(bp, addr);
 +
 +	/*
 +	 * in E1 we must use only PCI configuration space to disable
 +	 * MSI/MSIX capablility
 +	 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
 +	 */
 +	if (CHIP_IS_E1(bp)) {
 +		/*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
 +		 *  Use mask register to prevent from HC sending interrupts
 +		 *  after we exit the function
 +		 */
 +		REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
 +
 +		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +			 HC_CONFIG_0_REG_INT_LINE_EN_0 |
 +			 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +	} else
 +		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +			 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 +			 HC_CONFIG_0_REG_INT_LINE_EN_0 |
 +			 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +
 +	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
 +	   val, port, addr);
 +
 +	/* flush all outstanding writes */
 +	mmiowb();
 +
 +	REG_WR(bp, addr, val);
 +	if (REG_RD(bp, addr) != val)
 +		BNX2X_ERR("BUG! proper val not read from IGU!\n");
 +}
 +
 +static void bnx2x_igu_int_disable(struct bnx2x *bp)
 +{
 +	u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 +
 +	val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
 +		 IGU_PF_CONF_INT_LINE_EN |
 +		 IGU_PF_CONF_ATTN_BIT_EN);
 +
 +	DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
 +
 +	/* flush all outstanding writes */
 +	mmiowb();
 +
 +	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 +	if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
 +		BNX2X_ERR("BUG! proper val not read from IGU!\n");
 +}
 +
 +void bnx2x_int_disable(struct bnx2x *bp)
 +{
 +	if (bp->common.int_block == INT_BLOCK_HC)
 +		bnx2x_hc_int_disable(bp);
 +	else
 +		bnx2x_igu_int_disable(bp);
 +}
 +
 +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
 +{
 +	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 +	int i, offset;
 +
 +	if (disable_hw)
 +		/* prevent the HW from sending interrupts */
 +		bnx2x_int_disable(bp);
 +
 +	/* make sure all ISRs are done */
 +	if (msix) {
 +		synchronize_irq(bp->msix_table[0].vector);
 +		offset = 1;
 +#ifdef BCM_CNIC
 +		offset++;
 +#endif
 +		for_each_eth_queue(bp, i)
 +			synchronize_irq(bp->msix_table[offset++].vector);
 +	} else
 +		synchronize_irq(bp->pdev->irq);
 +
 +	/* make sure sp_task is not running */
 +	cancel_delayed_work(&bp->sp_task);
 +	cancel_delayed_work(&bp->period_task);
 +	flush_workqueue(bnx2x_wq);
 +}
 +
 +/* fast path */
 +
 +/*
 + * General service functions
 + */
 +
 +/* Return true if succeeded to acquire the lock */
 +static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
 +{
 +	u32 lock_status;
 +	u32 resource_bit = (1 << resource);
 +	int func = BP_FUNC(bp);
 +	u32 hw_lock_control_reg;
 +
 +	DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
 +
 +	/* Validating that the resource is within range */
 +	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
 +		DP(NETIF_MSG_HW,
 +		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
 +		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
 +		return false;
 +	}
 +
 +	if (func <= 5)
 +		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
 +	else
 +		hw_lock_control_reg =
 +				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
 +
 +	/* Try to acquire the lock */
 +	REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
 +	lock_status = REG_RD(bp, hw_lock_control_reg);
 +	if (lock_status & resource_bit)
 +		return true;
 +
 +	DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
 +	return false;
 +}
 +
 +/**
 + * bnx2x_get_leader_lock_resource - get the recovery leader resource id
 + *
 + * @bp:	driver handle
 + *
 + * Returns the recovery leader resource id according to the engine this function
 + * belongs to. Currently only only 2 engines is supported.
 + */
 +static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
 +{
 +	if (BP_PATH(bp))
 +		return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
 +	else
 +		return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
 +}
 +
 +/**
 + * bnx2x_trylock_leader_lock- try to aquire a leader lock.
 + *
 + * @bp: driver handle
 + *
 + * Tries to aquire a leader lock for cuurent engine.
 + */
 +static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
 +{
 +	return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
 +}
 +
 +#ifdef BCM_CNIC
 +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
 +#endif
 +
 +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
 +{
 +	struct bnx2x *bp = fp->bp;
 +	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
 +	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
 +	enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
 +	struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
 +
 +	DP(BNX2X_MSG_SP,
 +	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
 +	   fp->index, cid, command, bp->state,
 +	   rr_cqe->ramrod_cqe.ramrod_type);
 +
 +	switch (command) {
 +	case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
 +		DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
 +		drv_cmd = BNX2X_Q_CMD_UPDATE;
 +		break;
 +
 +	case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
 +		DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
 +		drv_cmd = BNX2X_Q_CMD_SETUP;
 +		break;
 +
 +	case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
 +		DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
 +		drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
 +		break;
 +
 +	case (RAMROD_CMD_ID_ETH_HALT):
 +		DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
 +		drv_cmd = BNX2X_Q_CMD_HALT;
 +		break;
 +
 +	case (RAMROD_CMD_ID_ETH_TERMINATE):
 +		DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
 +		drv_cmd = BNX2X_Q_CMD_TERMINATE;
 +		break;
 +
 +	case (RAMROD_CMD_ID_ETH_EMPTY):
 +		DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
 +		drv_cmd = BNX2X_Q_CMD_EMPTY;
 +		break;
 +
 +	default:
 +		BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
 +			  command, fp->index);
 +		return;
 +	}
 +
 +	if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
 +	    q_obj->complete_cmd(bp, q_obj, drv_cmd))
 +		/* q_obj->complete_cmd() failure means that this was
 +		 * an unexpected completion.
 +		 *
 +		 * In this case we don't want to increase the bp->spq_left
 +		 * because apparently we haven't sent this command the first
 +		 * place.
 +		 */
 +#ifdef BNX2X_STOP_ON_ERROR
 +		bnx2x_panic();
 +#else
 +		return;
 +#endif
 +
 +	smp_mb__before_atomic_inc();
 +	atomic_inc(&bp->cq_spq_left);
 +	/* push the change in bp->spq_left and towards the memory */
 +	smp_mb__after_atomic_inc();
 +
 +	DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
 +
 +	return;
 +}
 +
 +void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +			u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
 +{
 +	u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
 +
 +	bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
 +				 start);
 +}
 +
 +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
 +{
 +	struct bnx2x *bp = netdev_priv(dev_instance);
 +	u16 status = bnx2x_ack_int(bp);
 +	u16 mask;
 +	int i;
 +	u8 cos;
 +
 +	/* Return here if interrupt is shared and it's not for us */
 +	if (unlikely(status == 0)) {
 +		DP(NETIF_MSG_INTR, "not our interrupt!\n");
 +		return IRQ_NONE;
 +	}
 +	DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +	if (unlikely(bp->panic))
 +		return IRQ_HANDLED;
 +#endif
 +
 +	for_each_eth_queue(bp, i) {
 +		struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +		mask = 0x2 << (fp->index + CNIC_PRESENT);
 +		if (status & mask) {
 +			/* Handle Rx or Tx according to SB id */
 +			prefetch(fp->rx_cons_sb);
 +			for_each_cos_in_tx_queue(fp, cos)
 +				prefetch(fp->txdata[cos].tx_cons_sb);
 +			prefetch(&fp->sb_running_index[SM_RX_ID]);
 +			napi_schedule(&bnx2x_fp(bp, fp->index, napi));
 +			status &= ~mask;
 +		}
 +	}
 +
 +#ifdef BCM_CNIC
 +	mask = 0x2;
 +	if (status & (mask | 0x1)) {
 +		struct cnic_ops *c_ops = NULL;
 +
 +		if (likely(bp->state == BNX2X_STATE_OPEN)) {
 +			rcu_read_lock();
 +			c_ops = rcu_dereference(bp->cnic_ops);
 +			if (c_ops)
 +				c_ops->cnic_handler(bp->cnic_data, NULL);
 +			rcu_read_unlock();
 +		}
 +
 +		status &= ~mask;
 +	}
 +#endif
 +
 +	if (unlikely(status & 0x1)) {
 +		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 +
 +		status &= ~0x1;
 +		if (!status)
 +			return IRQ_HANDLED;
 +	}
 +
 +	if (unlikely(status))
 +		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
 +		   status);
 +
 +	return IRQ_HANDLED;
 +}
 +
 +/* Link */
 +
 +/*
 + * General service functions
 + */
 +
 +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
 +{
 +	u32 lock_status;
 +	u32 resource_bit = (1 << resource);
 +	int func = BP_FUNC(bp);
 +	u32 hw_lock_control_reg;
 +	int cnt;
 +
 +	/* Validating that the resource is within range */
 +	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
 +		DP(NETIF_MSG_HW,
 +		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
 +		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
 +		return -EINVAL;
 +	}
 +
 +	if (func <= 5) {
 +		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
 +	} else {
 +		hw_lock_control_reg =
 +				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
 +	}
 +
 +	/* Validating that the resource is not already taken */
 +	lock_status = REG_RD(bp, hw_lock_control_reg);
 +	if (lock_status & resource_bit) {
 +		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
 +		   lock_status, resource_bit);
 +		return -EEXIST;
 +	}
 +
 +	/* Try for 5 second every 5ms */
 +	for (cnt = 0; cnt < 1000; cnt++) {
 +		/* Try to acquire the lock */
 +		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
 +		lock_status = REG_RD(bp, hw_lock_control_reg);
 +		if (lock_status & resource_bit)
 +			return 0;
 +
 +		msleep(5);
 +	}
 +	DP(NETIF_MSG_HW, "Timeout\n");
 +	return -EAGAIN;
 +}
 +
 +int bnx2x_release_leader_lock(struct bnx2x *bp)
 +{
 +	return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
 +}
 +
 +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
 +{
 +	u32 lock_status;
 +	u32 resource_bit = (1 << resource);
 +	int func = BP_FUNC(bp);
 +	u32 hw_lock_control_reg;
 +
 +	DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
 +
 +	/* Validating that the resource is within range */
 +	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
 +		DP(NETIF_MSG_HW,
 +		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
 +		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
 +		return -EINVAL;
 +	}
 +
 +	if (func <= 5) {
 +		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
 +	} else {
 +		hw_lock_control_reg =
 +				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
 +	}
 +
 +	/* Validating that the resource is currently taken */
 +	lock_status = REG_RD(bp, hw_lock_control_reg);
 +	if (!(lock_status & resource_bit)) {
 +		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
 +		   lock_status, resource_bit);
 +		return -EFAULT;
 +	}
 +
 +	REG_WR(bp, hw_lock_control_reg, resource_bit);
 +	return 0;
 +}
 +
 +
 +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
 +{
 +	/* The GPIO should be swapped if swap register is set and active */
 +	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
 +			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
 +	int gpio_shift = gpio_num +
 +			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
 +	u32 gpio_mask = (1 << gpio_shift);
 +	u32 gpio_reg;
 +	int value;
 +
 +	if (gpio_num > MISC_REGISTERS_GPIO_3) {
 +		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
 +		return -EINVAL;
 +	}
 +
 +	/* read GPIO value */
 +	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
 +
 +	/* get the requested pin value */
 +	if ((gpio_reg & gpio_mask) == gpio_mask)
 +		value = 1;
 +	else
 +		value = 0;
 +
 +	DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
 +
 +	return value;
 +}
 +
 +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 +{
 +	/* The GPIO should be swapped if swap register is set and active */
 +	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
 +			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
 +	int gpio_shift = gpio_num +
 +			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
 +	u32 gpio_mask = (1 << gpio_shift);
 +	u32 gpio_reg;
 +
 +	if (gpio_num > MISC_REGISTERS_GPIO_3) {
 +		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
 +		return -EINVAL;
 +	}
 +
 +	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +	/* read GPIO and mask except the float bits */
 +	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
 +
 +	switch (mode) {
 +	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
 +		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
 +		   gpio_num, gpio_shift);
 +		/* clear FLOAT and set CLR */
 +		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
 +		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
 +		break;
 +
 +	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
 +		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
 +		   gpio_num, gpio_shift);
 +		/* clear FLOAT and set SET */
 +		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
 +		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
 +		break;
 +
 +	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
 +		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
 +		   gpio_num, gpio_shift);
 +		/* set FLOAT */
 +		gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
 +		break;
 +
 +	default:
 +		break;
 +	}
 +
 +	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
 +	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +
 +	return 0;
 +}
 +
 +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
 +{
 +	u32 gpio_reg = 0;
 +	int rc = 0;
 +
 +	/* Any port swapping should be handled by caller. */
 +
 +	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +	/* read GPIO and mask except the float bits */
 +	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
 +	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
 +	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
 +	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
 +
 +	switch (mode) {
 +	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
 +		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
 +		/* set CLR */
 +		gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
 +		break;
 +
 +	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
 +		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
 +		/* set SET */
 +		gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
 +		break;
 +
 +	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
 +		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
 +		/* set FLOAT */
 +		gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
 +		break;
 +
 +	default:
 +		BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
 +		rc = -EINVAL;
 +		break;
 +	}
 +
 +	if (rc == 0)
 +		REG_WR(bp, MISC_REG_GPIO, gpio_reg);
 +
 +	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +
 +	return rc;
 +}
 +
 +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 +{
 +	/* The GPIO should be swapped if swap register is set and active */
 +	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
 +			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
 +	int gpio_shift = gpio_num +
 +			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
 +	u32 gpio_mask = (1 << gpio_shift);
 +	u32 gpio_reg;
 +
 +	if (gpio_num > MISC_REGISTERS_GPIO_3) {
 +		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
 +		return -EINVAL;
 +	}
 +
 +	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +	/* read GPIO int */
 +	gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
 +
 +	switch (mode) {
 +	case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
 +		DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
 +				   "output low\n", gpio_num, gpio_shift);
 +		/* clear SET and set CLR */
 +		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
 +		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
 +		break;
 +
 +	case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
 +		DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
 +				   "output high\n", gpio_num, gpio_shift);
 +		/* clear CLR and set SET */
 +		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
 +		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
 +		break;
 +
 +	default:
 +		break;
 +	}
 +
 +	REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
 +	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +
 +	return 0;
 +}
 +
 +static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
 +{
 +	u32 spio_mask = (1 << spio_num);
 +	u32 spio_reg;
 +
 +	if ((spio_num < MISC_REGISTERS_SPIO_4) ||
 +	    (spio_num > MISC_REGISTERS_SPIO_7)) {
 +		BNX2X_ERR("Invalid SPIO %d\n", spio_num);
 +		return -EINVAL;
 +	}
 +
 +	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
 +	/* read SPIO and mask except the float bits */
 +	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
 +
 +	switch (mode) {
 +	case MISC_REGISTERS_SPIO_OUTPUT_LOW:
 +		DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
 +		/* clear FLOAT and set CLR */
 +		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
 +		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
 +		break;
 +
 +	case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
 +		DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
 +		/* clear FLOAT and set SET */
 +		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
 +		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
 +		break;
 +
 +	case MISC_REGISTERS_SPIO_INPUT_HI_Z:
 +		DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
 +		/* set FLOAT */
 +		spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
 +		break;
 +
 +	default:
 +		break;
 +	}
 +
 +	REG_WR(bp, MISC_REG_SPIO, spio_reg);
 +	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
 +
 +	return 0;
 +}
 +
 +void bnx2x_calc_fc_adv(struct bnx2x *bp)
 +{
 +	u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
 +	switch (bp->link_vars.ieee_fc &
 +		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
 +	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
 +		bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
 +						   ADVERTISED_Pause);
 +		break;
 +
 +	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
 +		bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
 +						  ADVERTISED_Pause);
 +		break;
 +
 +	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
 +		bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
 +		break;
 +
 +	default:
 +		bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
 +						   ADVERTISED_Pause);
 +		break;
 +	}
 +}
 +
 +u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
 +{
 +	if (!BP_NOMCP(bp)) {
 +		u8 rc;
 +		int cfx_idx = bnx2x_get_link_cfg_idx(bp);
 +		u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
 +		/*
 +		 * Initialize link parameters structure variables
 +		 * It is recommended to turn off RX FC for jumbo frames
 +		 * for better performance
 +		 */
 +		if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
 +			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
 +		else
 +			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
 +
 +		bnx2x_acquire_phy_lock(bp);
 +
 +		if (load_mode == LOAD_DIAG) {
 +			struct link_params *lp = &bp->link_params;
 +			lp->loopback_mode = LOOPBACK_XGXS;
 +			/* do PHY loopback at 10G speed, if possible */
 +			if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
 +				if (lp->speed_cap_mask[cfx_idx] &
 +				    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
 +					lp->req_line_speed[cfx_idx] =
 +					SPEED_10000;
 +				else
 +					lp->req_line_speed[cfx_idx] =
 +					SPEED_1000;
 +			}
 +		}
 +
 +		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 +
 +		bnx2x_release_phy_lock(bp);
 +
 +		bnx2x_calc_fc_adv(bp);
 +
 +		if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
 +			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 +			bnx2x_link_report(bp);
 +		} else
 +			queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
 +		bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
 +		return rc;
 +	}
 +	BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 +	return -EINVAL;
 +}
 +
 +void bnx2x_link_set(struct bnx2x *bp)
 +{
 +	if (!BP_NOMCP(bp)) {
 +		bnx2x_acquire_phy_lock(bp);
 +		bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
 +		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 +		bnx2x_release_phy_lock(bp);
 +
 +		bnx2x_calc_fc_adv(bp);
 +	} else
 +		BNX2X_ERR("Bootcode is missing - can not set link\n");
 +}
 +
 +static void bnx2x__link_reset(struct bnx2x *bp)
 +{
 +	if (!BP_NOMCP(bp)) {
 +		bnx2x_acquire_phy_lock(bp);
 +		bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
 +		bnx2x_release_phy_lock(bp);
 +	} else
 +		BNX2X_ERR("Bootcode is missing - can not reset link\n");
 +}
 +
 +u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
 +{
 +	u8 rc = 0;
 +
 +	if (!BP_NOMCP(bp)) {
 +		bnx2x_acquire_phy_lock(bp);
 +		rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
 +				     is_serdes);
 +		bnx2x_release_phy_lock(bp);
 +	} else
 +		BNX2X_ERR("Bootcode is missing - can not test link\n");
 +
 +	return rc;
 +}
 +
 +static void bnx2x_init_port_minmax(struct bnx2x *bp)
 +{
 +	u32 r_param = bp->link_vars.line_speed / 8;
 +	u32 fair_periodic_timeout_usec;
 +	u32 t_fair;
 +
 +	memset(&(bp->cmng.rs_vars), 0,
 +	       sizeof(struct rate_shaping_vars_per_port));
 +	memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
 +
 +	/* 100 usec in SDM ticks = 25 since each tick is 4 usec */
 +	bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
 +
 +	/* this is the threshold below which no timer arming will occur
 +	   1.25 coefficient is for the threshold to be a little bigger
 +	   than the real time, to compensate for timer in-accuracy */
 +	bp->cmng.rs_vars.rs_threshold =
 +				(RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
 +
 +	/* resolution of fairness timer */
 +	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
 +	/* for 10G it is 1000usec. for 1G it is 10000usec. */
 +	t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
 +
 +	/* this is the threshold below which we won't arm the timer anymore */
 +	bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
 +
 +	/* we multiply by 1e3/8 to get bytes/msec.
 +	   We don't want the credits to pass a credit
 +	   of the t_fair*FAIR_MEM (algorithm resolution) */
 +	bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
 +	/* since each tick is 4 usec */
 +	bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
 +}
 +
 +/* Calculates the sum of vn_min_rates.
 +   It's needed for further normalizing of the min_rates.
 +   Returns:
 +     sum of vn_min_rates.
 +       or
 +     0 - if all the min_rates are 0.
 +     In the later case fainess algorithm should be deactivated.
 +     If not all min_rates are zero then those that are zeroes will be set to 1.
 + */
 +static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
 +{
 +	int all_zero = 1;
 +	int vn;
 +
 +	bp->vn_weight_sum = 0;
 +	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
 +		u32 vn_cfg = bp->mf_config[vn];
 +		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
 +				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
 +
 +		/* Skip hidden vns */
 +		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
 +			continue;
 +
 +		/* If min rate is zero - set it to 1 */
 +		if (!vn_min_rate)
 +			vn_min_rate = DEF_MIN_RATE;
 +		else
 +			all_zero = 0;
 +
 +		bp->vn_weight_sum += vn_min_rate;
 +	}
 +
 +	/* if ETS or all min rates are zeros - disable fairness */
 +	if (BNX2X_IS_ETS_ENABLED(bp)) {
 +		bp->cmng.flags.cmng_enables &=
 +					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 +		DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
 +	} else if (all_zero) {
 +		bp->cmng.flags.cmng_enables &=
 +					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 +		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
 +		   "  fairness will be disabled\n");
 +	} else
 +		bp->cmng.flags.cmng_enables |=
 +					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 +}
 +
 +/* returns func by VN for current port */
 +static inline int func_by_vn(struct bnx2x *bp, int vn)
 +{
 +	return 2 * vn + BP_PORT(bp);
 +}
 +
 +static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
 +{
 +	struct rate_shaping_vars_per_vn m_rs_vn;
 +	struct fairness_vars_per_vn m_fair_vn;
 +	u32 vn_cfg = bp->mf_config[vn];
 +	int func = func_by_vn(bp, vn);
 +	u16 vn_min_rate, vn_max_rate;
 +	int i;
 +
 +	/* If function is hidden - set min and max to zeroes */
 +	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
 +		vn_min_rate = 0;
 +		vn_max_rate = 0;
 +
 +	} else {
 +		u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 +
 +		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
 +				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
 +		/* If fairness is enabled (not all min rates are zeroes) and
 +		   if current min rate is zero - set it to 1.
 +		   This is a requirement of the algorithm. */
 +		if (bp->vn_weight_sum && (vn_min_rate == 0))
 +			vn_min_rate = DEF_MIN_RATE;
 +
 +		if (IS_MF_SI(bp))
 +			/* maxCfg in percents of linkspeed */
 +			vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
 +		else
 +			/* maxCfg is absolute in 100Mb units */
 +			vn_max_rate = maxCfg * 100;
 +	}
 +
 +	DP(NETIF_MSG_IFUP,
 +	   "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
 +	   func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
 +
 +	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
 +	memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
 +
 +	/* global vn counter - maximal Mbps for this vn */
 +	m_rs_vn.vn_counter.rate = vn_max_rate;
 +
 +	/* quota - number of bytes transmitted in this period */
 +	m_rs_vn.vn_counter.quota =
 +				(vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
 +
 +	if (bp->vn_weight_sum) {
 +		/* credit for each period of the fairness algorithm:
 +		   number of bytes in T_FAIR (the vn share the port rate).
 +		   vn_weight_sum should not be larger than 10000, thus
 +		   T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
 +		   than zero */
 +		m_fair_vn.vn_credit_delta =
 +			max_t(u32, (vn_min_rate * (T_FAIR_COEF /
 +						   (8 * bp->vn_weight_sum))),
 +			      (bp->cmng.fair_vars.fair_threshold +
 +							MIN_ABOVE_THRESH));
 +		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
 +		   m_fair_vn.vn_credit_delta);
 +	}
 +
 +	/* Store it to internal memory */
 +	for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
 +		REG_WR(bp, BAR_XSTRORM_INTMEM +
 +		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
 +		       ((u32 *)(&m_rs_vn))[i]);
 +
 +	for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
 +		REG_WR(bp, BAR_XSTRORM_INTMEM +
 +		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
 +		       ((u32 *)(&m_fair_vn))[i]);
 +}
 +
 +static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
 +{
 +	if (CHIP_REV_IS_SLOW(bp))
 +		return CMNG_FNS_NONE;
 +	if (IS_MF(bp))
 +		return CMNG_FNS_MINMAX;
 +
 +	return CMNG_FNS_NONE;
 +}
 +
 +void bnx2x_read_mf_cfg(struct bnx2x *bp)
 +{
 +	int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
 +
 +	if (BP_NOMCP(bp))
 +		return; /* what should be the default bvalue in this case */
 +
 +	/* For 2 port configuration the absolute function number formula
 +	 * is:
 +	 *      abs_func = 2 * vn + BP_PORT + BP_PATH
 +	 *
 +	 *      and there are 4 functions per port
 +	 *
 +	 * For 4 port configuration it is
 +	 *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
 +	 *
 +	 *      and there are 2 functions per port
 +	 */
 +	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
 +		int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
 +
 +		if (func >= E1H_FUNC_MAX)
 +			break;
 +
 +		bp->mf_config[vn] =
 +			MF_CFG_RD(bp, func_mf_config[func].config);
 +	}
 +}
 +
 +static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 +{
 +
 +	if (cmng_type == CMNG_FNS_MINMAX) {
 +		int vn;
 +
 +		/* clear cmng_enables */
 +		bp->cmng.flags.cmng_enables = 0;
 +
 +		/* read mf conf from shmem */
 +		if (read_cfg)
 +			bnx2x_read_mf_cfg(bp);
 +
 +		/* Init rate shaping and fairness contexts */
 +		bnx2x_init_port_minmax(bp);
 +
 +		/* vn_weight_sum and enable fairness if not 0 */
 +		bnx2x_calc_vn_weight_sum(bp);
 +
 +		/* calculate and set min-max rate for each vn */
 +		if (bp->port.pmf)
 +			for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
 +				bnx2x_init_vn_minmax(bp, vn);
 +
 +		/* always enable rate shaping and fairness */
 +		bp->cmng.flags.cmng_enables |=
 +					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
 +		if (!bp->vn_weight_sum)
 +			DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
 +				   "  fairness will be disabled\n");
 +		return;
 +	}
 +
 +	/* rate shaping and fairness are disabled */
 +	DP(NETIF_MSG_IFUP,
 +	   "rate shaping and fairness are disabled\n");
 +}
 +
 +static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
 +{
 +	int func;
 +	int vn;
 +
 +	/* Set the attention towards other drivers on the same port */
 +	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
 +		if (vn == BP_VN(bp))
 +			continue;
 +
 +		func = func_by_vn(bp, vn);
 +		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
 +		       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
 +	}
 +}
 +
 +/* This function is called upon link interrupt */
 +static void bnx2x_link_attn(struct bnx2x *bp)
 +{
 +	/* Make sure that we are synced with the current statistics */
 +	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +
 +	bnx2x_link_update(&bp->link_params, &bp->link_vars);
 +
 +	if (bp->link_vars.link_up) {
 +
 +		/* dropless flow control */
 +		if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
 +			int port = BP_PORT(bp);
 +			u32 pause_enabled = 0;
 +
 +			if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
 +				pause_enabled = 1;
 +
 +			REG_WR(bp, BAR_USTRORM_INTMEM +
 +			       USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
 +			       pause_enabled);
 +		}
 +
 +		if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
 +			struct host_port_stats *pstats;
 +
 +			pstats = bnx2x_sp(bp, port_stats);
 +			/* reset old mac stats */
 +			memset(&(pstats->mac_stx[0]), 0,
 +			       sizeof(struct mac_stx));
 +		}
 +		if (bp->state == BNX2X_STATE_OPEN)
 +			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 +	}
 +
 +	if (bp->link_vars.link_up && bp->link_vars.line_speed) {
 +		int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
 +
 +		if (cmng_fns != CMNG_FNS_NONE) {
 +			bnx2x_cmng_fns_init(bp, false, cmng_fns);
 +			storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 +		} else
 +			/* rate shaping and fairness are disabled */
 +			DP(NETIF_MSG_IFUP,
 +			   "single function mode without fairness\n");
 +	}
 +
 +	__bnx2x_link_report(bp);
 +
 +	if (IS_MF(bp))
 +		bnx2x_link_sync_notify(bp);
 +}
 +
 +void bnx2x__link_status_update(struct bnx2x *bp)
 +{
 +	if (bp->state != BNX2X_STATE_OPEN)
 +		return;
 +
 +	bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
 +
 +	if (bp->link_vars.link_up)
 +		bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 +	else
 +		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +
 +	/* indicate link status */
 +	bnx2x_link_report(bp);
 +}
 +
 +static void bnx2x_pmf_update(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	u32 val;
 +
 +	bp->port.pmf = 1;
 +	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
 +
 +	/*
 +	 * We need the mb() to ensure the ordering between the writing to
 +	 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
 +	 */
 +	smp_mb();
 +
 +	/* queue a periodic task */
 +	queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
 +
 +	bnx2x_dcbx_pmf_update(bp);
 +
 +	/* enable nig attention */
 +	val = (0xff0f | (1 << (BP_VN(bp) + 4)));
 +	if (bp->common.int_block == INT_BLOCK_HC) {
 +		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
 +		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
 +	} else if (!CHIP_IS_E1x(bp)) {
 +		REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
 +		REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
 +	}
 +
 +	bnx2x_stats_handle(bp, STATS_EVENT_PMF);
 +}
 +
 +/* end of Link */
 +
 +/* slow path */
 +
 +/*
 + * General service functions
 + */
 +
 +/* send the MCP a request, block until there is a reply */
 +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
 +{
 +	int mb_idx = BP_FW_MB_IDX(bp);
 +	u32 seq;
 +	u32 rc = 0;
 +	u32 cnt = 1;
 +	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
 +
 +	mutex_lock(&bp->fw_mb_mutex);
 +	seq = ++bp->fw_seq;
 +	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
 +	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
 +
 +	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
 +			(command | seq), param);
 +
 +	do {
 +		/* let the FW do it's magic ... */
 +		msleep(delay);
 +
 +		rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
 +
 +		/* Give the FW up to 5 second (500*10ms) */
 +	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
 +
 +	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
 +	   cnt*delay, rc, seq);
 +
 +	/* is this a reply to our command? */
 +	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
 +		rc &= FW_MSG_CODE_MASK;
 +	else {
 +		/* FW BUG! */
 +		BNX2X_ERR("FW failed to respond!\n");
 +		bnx2x_fw_dump(bp);
 +		rc = 0;
 +	}
 +	mutex_unlock(&bp->fw_mb_mutex);
 +
 +	return rc;
 +}
 +
 +static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
 +{
 +#ifdef BCM_CNIC
 +	/* Statistics are not supported for CNIC Clients at the moment */
 +	if (IS_FCOE_FP(fp))
 +		return false;
 +#endif
 +	return true;
 +}
 +
 +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 +{
 +	if (CHIP_IS_E1x(bp)) {
 +		struct tstorm_eth_function_common_config tcfg = {0};
 +
 +		storm_memset_func_cfg(bp, &tcfg, p->func_id);
 +	}
 +
 +	/* Enable the function in the FW */
 +	storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
 +	storm_memset_func_en(bp, p->func_id, 1);
 +
 +	/* spq */
 +	if (p->func_flgs & FUNC_FLG_SPQ) {
 +		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
 +		REG_WR(bp, XSEM_REG_FAST_MEMORY +
 +		       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
 +	}
 +}
 +
 +/**
 + * bnx2x_get_tx_only_flags - Return common flags
 + *
 + * @bp		device handle
 + * @fp		queue handle
 + * @zero_stats	TRUE if statistics zeroing is needed
 + *
 + * Return the flags that are common for the Tx-only and not normal connections.
 + */
 +static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
 +						   struct bnx2x_fastpath *fp,
 +						   bool zero_stats)
 +{
 +	unsigned long flags = 0;
 +
 +	/* PF driver will always initialize the Queue to an ACTIVE state */
 +	__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
 +
 +	/* tx only connections collect statistics (on the same index as the
 +	 *  parent connection). The statistics are zeroed when the parent
 +	 *  connection is initialized.
 +	 */
 +	if (stat_counter_valid(bp, fp)) {
 +		__set_bit(BNX2X_Q_FLG_STATS, &flags);
 +		if (zero_stats)
 +			__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
 +	}
 +
 +	return flags;
 +}
 +
 +static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
 +					      struct bnx2x_fastpath *fp,
 +					      bool leading)
 +{
 +	unsigned long flags = 0;
 +
 +	/* calculate other queue flags */
 +	if (IS_MF_SD(bp))
 +		__set_bit(BNX2X_Q_FLG_OV, &flags);
 +
 +	if (IS_FCOE_FP(fp))
 +		__set_bit(BNX2X_Q_FLG_FCOE, &flags);
 +
 +	if (!fp->disable_tpa) {
 +		__set_bit(BNX2X_Q_FLG_TPA, &flags);
 +		__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
 +	}
 +
 +	if (leading) {
 +		__set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
 +		__set_bit(BNX2X_Q_FLG_MCAST, &flags);
 +	}
 +
 +	/* Always set HW VLAN stripping */
 +	__set_bit(BNX2X_Q_FLG_VLAN, &flags);
 +
 +
 +	return flags | bnx2x_get_common_flags(bp, fp, true);
 +}
 +
 +static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
 +	struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
 +	u8 cos)
 +{
 +	gen_init->stat_id = bnx2x_stats_id(fp);
 +	gen_init->spcl_id = fp->cl_id;
 +
 +	/* Always use mini-jumbo MTU for FCoE L2 ring */
 +	if (IS_FCOE_FP(fp))
 +		gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
 +	else
 +		gen_init->mtu = bp->dev->mtu;
 +
 +	gen_init->cos = cos;
 +}
 +
 +static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
 +	struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
 +	struct bnx2x_rxq_setup_params *rxq_init)
 +{
 +	u8 max_sge = 0;
 +	u16 sge_sz = 0;
 +	u16 tpa_agg_size = 0;
 +
 +	if (!fp->disable_tpa) {
 +		pause->sge_th_lo = SGE_TH_LO(bp);
 +		pause->sge_th_hi = SGE_TH_HI(bp);
 +
 +		/* validate SGE ring has enough to cross high threshold */
 +		WARN_ON(bp->dropless_fc &&
 +				pause->sge_th_hi + FW_PREFETCH_CNT >
 +				MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
 +
 +		tpa_agg_size = min_t(u32,
 +			(min_t(u32, 8, MAX_SKB_FRAGS) *
 +			SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
 +		max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
 +			SGE_PAGE_SHIFT;
 +		max_sge = ((max_sge + PAGES_PER_SGE - 1) &
 +			  (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
 +		sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
 +				    0xffff);
 +	}
 +
 +	/* pause - not for e1 */
 +	if (!CHIP_IS_E1(bp)) {
 +		pause->bd_th_lo = BD_TH_LO(bp);
 +		pause->bd_th_hi = BD_TH_HI(bp);
 +
 +		pause->rcq_th_lo = RCQ_TH_LO(bp);
 +		pause->rcq_th_hi = RCQ_TH_HI(bp);
 +		/*
 +		 * validate that rings have enough entries to cross
 +		 * high thresholds
 +		 */
 +		WARN_ON(bp->dropless_fc &&
 +				pause->bd_th_hi + FW_PREFETCH_CNT >
 +				bp->rx_ring_size);
 +		WARN_ON(bp->dropless_fc &&
 +				pause->rcq_th_hi + FW_PREFETCH_CNT >
 +				NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
 +
 +		pause->pri_map = 1;
 +	}
 +
 +	/* rxq setup */
 +	rxq_init->dscr_map = fp->rx_desc_mapping;
 +	rxq_init->sge_map = fp->rx_sge_mapping;
 +	rxq_init->rcq_map = fp->rx_comp_mapping;
 +	rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
 +
 +	/* This should be a maximum number of data bytes that may be
 +	 * placed on the BD (not including paddings).
 +	 */
 +	rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
 +		IP_HEADER_ALIGNMENT_PADDING;
 +
 +	rxq_init->cl_qzone_id = fp->cl_qzone_id;
 +	rxq_init->tpa_agg_sz = tpa_agg_size;
 +	rxq_init->sge_buf_sz = sge_sz;
 +	rxq_init->max_sges_pkt = max_sge;
 +	rxq_init->rss_engine_id = BP_FUNC(bp);
 +
 +	/* Maximum number or simultaneous TPA aggregation for this Queue.
 +	 *
 +	 * For PF Clients it should be the maximum avaliable number.
 +	 * VF driver(s) may want to define it to a smaller value.
 +	 */
 +	rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
 +
 +	rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
 +	rxq_init->fw_sb_id = fp->fw_sb_id;
 +
 +	if (IS_FCOE_FP(fp))
 +		rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
 +	else
 +		rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
 +}
 +
 +static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
 +	struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
 +	u8 cos)
 +{
 +	txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
 +	txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
 +	txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
 +	txq_init->fw_sb_id = fp->fw_sb_id;
 +
 +	/*
 +	 * set the tss leading client id for TX classfication ==
 +	 * leading RSS client id
 +	 */
 +	txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
 +
 +	if (IS_FCOE_FP(fp)) {
 +		txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
 +		txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
 +	}
 +}
 +
 +static void bnx2x_pf_init(struct bnx2x *bp)
 +{
 +	struct bnx2x_func_init_params func_init = {0};
 +	struct event_ring_data eq_data = { {0} };
 +	u16 flags;
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		/* reset IGU PF statistics: MSIX + ATTN */
 +		/* PF */
 +		REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
 +			   BNX2X_IGU_STAS_MSG_VF_CNT*4 +
 +			   (CHIP_MODE_IS_4_PORT(bp) ?
 +				BP_FUNC(bp) : BP_VN(bp))*4, 0);
 +		/* ATTN */
 +		REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
 +			   BNX2X_IGU_STAS_MSG_VF_CNT*4 +
 +			   BNX2X_IGU_STAS_MSG_PF_CNT*4 +
 +			   (CHIP_MODE_IS_4_PORT(bp) ?
 +				BP_FUNC(bp) : BP_VN(bp))*4, 0);
 +	}
 +
 +	/* function setup flags */
 +	flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
 +
 +	/* This flag is relevant for E1x only.
 +	 * E2 doesn't have a TPA configuration in a function level.
 +	 */
 +	flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
 +
 +	func_init.func_flgs = flags;
 +	func_init.pf_id = BP_FUNC(bp);
 +	func_init.func_id = BP_FUNC(bp);
 +	func_init.spq_map = bp->spq_mapping;
 +	func_init.spq_prod = bp->spq_prod_idx;
 +
 +	bnx2x_func_init(bp, &func_init);
 +
 +	memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
 +
 +	/*
 +	 * Congestion management values depend on the link rate
 +	 * There is no active link so initial link rate is set to 10 Gbps.
 +	 * When the link comes up The congestion management values are
 +	 * re-calculated according to the actual link rate.
 +	 */
 +	bp->link_vars.line_speed = SPEED_10000;
 +	bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
 +
 +	/* Only the PMF sets the HW */
 +	if (bp->port.pmf)
 +		storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 +
 +	/* init Event Queue */
 +	eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
 +	eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
 +	eq_data.producer = bp->eq_prod;
 +	eq_data.index_id = HC_SP_INDEX_EQ_CONS;
 +	eq_data.sb_id = DEF_SB_ID;
 +	storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
 +}
 +
 +
 +static void bnx2x_e1h_disable(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +
 +	bnx2x_tx_disable(bp);
 +
 +	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 +}
 +
 +static void bnx2x_e1h_enable(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +
 +	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
 +
 +	/* Tx queue should be only reenabled */
 +	netif_tx_wake_all_queues(bp->dev);
 +
 +	/*
 +	 * Should not call netif_carrier_on since it will be called if the link
 +	 * is up when checking for link state
 +	 */
 +}
 +
 +/* called due to MCP event (on pmf):
 + *	reread new bandwidth configuration
 + *	configure FW
 + *	notify others function about the change
 + */
 +static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
 +{
 +	if (bp->link_vars.link_up) {
 +		bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
 +		bnx2x_link_sync_notify(bp);
 +	}
 +	storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 +}
 +
 +static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
 +{
 +	bnx2x_config_mf_bw(bp);
 +	bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
 +}
 +
 +static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 +{
 +	DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
 +
 +	if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
 +
 +		/*
 +		 * This is the only place besides the function initialization
 +		 * where the bp->flags can change so it is done without any
 +		 * locks
 +		 */
 +		if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
 +			DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
 +			bp->flags |= MF_FUNC_DIS;
 +
 +			bnx2x_e1h_disable(bp);
 +		} else {
 +			DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
 +			bp->flags &= ~MF_FUNC_DIS;
 +
 +			bnx2x_e1h_enable(bp);
 +		}
 +		dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
 +	}
 +	if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
 +		bnx2x_config_mf_bw(bp);
 +		dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
 +	}
 +
 +	/* Report results to MCP */
 +	if (dcc_event)
 +		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
 +	else
 +		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
 +}
 +
 +/* must be called under the spq lock */
 +static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
 +{
 +	struct eth_spe *next_spe = bp->spq_prod_bd;
 +
 +	if (bp->spq_prod_bd == bp->spq_last_bd) {
 +		bp->spq_prod_bd = bp->spq;
 +		bp->spq_prod_idx = 0;
 +		DP(NETIF_MSG_TIMER, "end of spq\n");
 +	} else {
 +		bp->spq_prod_bd++;
 +		bp->spq_prod_idx++;
 +	}
 +	return next_spe;
 +}
 +
 +/* must be called under the spq lock */
 +static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
 +{
 +	int func = BP_FUNC(bp);
 +
 +	/*
 +	 * Make sure that BD data is updated before writing the producer:
 +	 * BD data is written to the memory, the producer is read from the
 +	 * memory, thus we need a full memory barrier to ensure the ordering.
 +	 */
 +	mb();
 +
 +	REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
 +		 bp->spq_prod_idx);
 +	mmiowb();
 +}
 +
 +/**
 + * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
 + *
 + * @cmd:	command to check
 + * @cmd_type:	command type
 + */
 +static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
 +{
 +	if ((cmd_type == NONE_CONNECTION_TYPE) ||
 +	    (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
 +	    (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
 +	    (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
 +	    (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
 +	    (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
 +	    (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
 +		return true;
 +	else
 +		return false;
 +
 +}
 +
 +
 +/**
 + * bnx2x_sp_post - place a single command on an SP ring
 + *
 + * @bp:		driver handle
 + * @command:	command to place (e.g. SETUP, FILTER_RULES, etc.)
 + * @cid:	SW CID the command is related to
 + * @data_hi:	command private data address (high 32 bits)
 + * @data_lo:	command private data address (low 32 bits)
 + * @cmd_type:	command type (e.g. NONE, ETH)
 + *
 + * SP data is handled as if it's always an address pair, thus data fields are
 + * not swapped to little endian in upper functions. Instead this function swaps
 + * data as if it's two u32 fields.
 + */
 +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 +		  u32 data_hi, u32 data_lo, int cmd_type)
 +{
 +	struct eth_spe *spe;
 +	u16 type;
 +	bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +	if (unlikely(bp->panic))
 +		return -EIO;
 +#endif
 +
 +	spin_lock_bh(&bp->spq_lock);
 +
 +	if (common) {
 +		if (!atomic_read(&bp->eq_spq_left)) {
 +			BNX2X_ERR("BUG! EQ ring full!\n");
 +			spin_unlock_bh(&bp->spq_lock);
 +			bnx2x_panic();
 +			return -EBUSY;
 +		}
 +	} else if (!atomic_read(&bp->cq_spq_left)) {
 +			BNX2X_ERR("BUG! SPQ ring full!\n");
 +			spin_unlock_bh(&bp->spq_lock);
 +			bnx2x_panic();
 +			return -EBUSY;
 +	}
 +
 +	spe = bnx2x_sp_get_next(bp);
 +
 +	/* CID needs port number to be encoded int it */
 +	spe->hdr.conn_and_cmd_data =
 +			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
 +				    HW_CID(bp, cid));
 +
 +	type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
 +
 +	type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
 +		 SPE_HDR_FUNCTION_ID);
 +
 +	spe->hdr.type = cpu_to_le16(type);
 +
 +	spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
 +	spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
 +
 +	/*
 +	 * It's ok if the actual decrement is issued towards the memory
 +	 * somewhere between the spin_lock and spin_unlock. Thus no
 +	 * more explict memory barrier is needed.
 +	 */
 +	if (common)
 +		atomic_dec(&bp->eq_spq_left);
 +	else
 +		atomic_dec(&bp->cq_spq_left);
 +
 +
 +	DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
 +	   "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) "
 +	   "type(0x%x) left (CQ, EQ) (%x,%x)\n",
 +	   bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
 +	   (u32)(U64_LO(bp->spq_mapping) +
 +	   (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
 +	   HW_CID(bp, cid), data_hi, data_lo, type,
 +	   atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
 +
 +	bnx2x_sp_prod_update(bp);
 +	spin_unlock_bh(&bp->spq_lock);
 +	return 0;
 +}
 +
 +/* acquire split MCP access lock register */
 +static int bnx2x_acquire_alr(struct bnx2x *bp)
 +{
 +	u32 j, val;
 +	int rc = 0;
 +
 +	might_sleep();
 +	for (j = 0; j < 1000; j++) {
 +		val = (1UL << 31);
 +		REG_WR(bp, GRCBASE_MCP + 0x9c, val);
 +		val = REG_RD(bp, GRCBASE_MCP + 0x9c);
 +		if (val & (1L << 31))
 +			break;
 +
 +		msleep(5);
 +	}
 +	if (!(val & (1L << 31))) {
 +		BNX2X_ERR("Cannot acquire MCP access lock register\n");
 +		rc = -EBUSY;
 +	}
 +
 +	return rc;
 +}
 +
 +/* release split MCP access lock register */
 +static void bnx2x_release_alr(struct bnx2x *bp)
 +{
 +	REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
 +}
 +
 +#define BNX2X_DEF_SB_ATT_IDX	0x0001
 +#define BNX2X_DEF_SB_IDX	0x0002
 +
 +static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
 +{
 +	struct host_sp_status_block *def_sb = bp->def_status_blk;
 +	u16 rc = 0;
 +
 +	barrier(); /* status block is written to by the chip */
 +	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
 +		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
 +		rc |= BNX2X_DEF_SB_ATT_IDX;
 +	}
 +
 +	if (bp->def_idx != def_sb->sp_sb.running_index) {
 +		bp->def_idx = def_sb->sp_sb.running_index;
 +		rc |= BNX2X_DEF_SB_IDX;
 +	}
 +
 +	/* Do not reorder: indecies reading should complete before handling */
 +	barrier();
 +	return rc;
 +}
 +
 +/*
 + * slow path service functions
 + */
 +
 +static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
 +{
 +	int port = BP_PORT(bp);
 +	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 +			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
 +	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
 +				       NIG_REG_MASK_INTERRUPT_PORT0;
 +	u32 aeu_mask;
 +	u32 nig_mask = 0;
 +	u32 reg_addr;
 +
 +	if (bp->attn_state & asserted)
 +		BNX2X_ERR("IGU ERROR\n");
 +
 +	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 +	aeu_mask = REG_RD(bp, aeu_addr);
 +
 +	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
 +	   aeu_mask, asserted);
 +	aeu_mask &= ~(asserted & 0x3ff);
 +	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
 +
 +	REG_WR(bp, aeu_addr, aeu_mask);
 +	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 +
 +	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
 +	bp->attn_state |= asserted;
 +	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
 +
 +	if (asserted & ATTN_HARD_WIRED_MASK) {
 +		if (asserted & ATTN_NIG_FOR_FUNC) {
 +
 +			bnx2x_acquire_phy_lock(bp);
 +
 +			/* save nig interrupt mask */
 +			nig_mask = REG_RD(bp, nig_int_mask_addr);
 +
 +			/* If nig_mask is not set, no need to call the update
 +			 * function.
 +			 */
 +			if (nig_mask) {
 +				REG_WR(bp, nig_int_mask_addr, 0);
 +
 +				bnx2x_link_attn(bp);
 +			}
 +
 +			/* handle unicore attn? */
 +		}
 +		if (asserted & ATTN_SW_TIMER_4_FUNC)
 +			DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
 +
 +		if (asserted & GPIO_2_FUNC)
 +			DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
 +
 +		if (asserted & GPIO_3_FUNC)
 +			DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
 +
 +		if (asserted & GPIO_4_FUNC)
 +			DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
 +
 +		if (port == 0) {
 +			if (asserted & ATTN_GENERAL_ATTN_1) {
 +				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
 +				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
 +			}
 +			if (asserted & ATTN_GENERAL_ATTN_2) {
 +				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
 +				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
 +			}
 +			if (asserted & ATTN_GENERAL_ATTN_3) {
 +				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
 +				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
 +			}
 +		} else {
 +			if (asserted & ATTN_GENERAL_ATTN_4) {
 +				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
 +				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
 +			}
 +			if (asserted & ATTN_GENERAL_ATTN_5) {
 +				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
 +				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
 +			}
 +			if (asserted & ATTN_GENERAL_ATTN_6) {
 +				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
 +				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
 +			}
 +		}
 +
 +	} /* if hardwired */
 +
 +	if (bp->common.int_block == INT_BLOCK_HC)
 +		reg_addr = (HC_REG_COMMAND_REG + port*32 +
 +			    COMMAND_REG_ATTN_BITS_SET);
 +	else
 +		reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
 +
 +	DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
 +	   (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
 +	REG_WR(bp, reg_addr, asserted);
 +
 +	/* now set back the mask */
 +	if (asserted & ATTN_NIG_FOR_FUNC) {
 +		REG_WR(bp, nig_int_mask_addr, nig_mask);
 +		bnx2x_release_phy_lock(bp);
 +	}
 +}
 +
 +static inline void bnx2x_fan_failure(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	u32 ext_phy_config;
 +	/* mark the failure */
 +	ext_phy_config =
 +		SHMEM_RD(bp,
 +			 dev_info.port_hw_config[port].external_phy_config);
 +
 +	ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
 +	ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
 +	SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
 +		 ext_phy_config);
 +
 +	/* log the failure */
 +	netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
 +	       " the driver to shutdown the card to prevent permanent"
 +	       " damage.  Please contact OEM Support for assistance\n");
 +}
 +
 +static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
 +{
 +	int port = BP_PORT(bp);
 +	int reg_offset;
 +	u32 val;
 +
 +	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
 +			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
 +
 +	if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
 +
 +		val = REG_RD(bp, reg_offset);
 +		val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
 +		REG_WR(bp, reg_offset, val);
 +
 +		BNX2X_ERR("SPIO5 hw attention\n");
 +
 +		/* Fan failure attention */
 +		bnx2x_hw_reset_phy(&bp->link_params);
 +		bnx2x_fan_failure(bp);
 +	}
 +
 +	if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
 +		bnx2x_acquire_phy_lock(bp);
 +		bnx2x_handle_module_detect_int(&bp->link_params);
 +		bnx2x_release_phy_lock(bp);
 +	}
 +
 +	if (attn & HW_INTERRUT_ASSERT_SET_0) {
 +
 +		val = REG_RD(bp, reg_offset);
 +		val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
 +		REG_WR(bp, reg_offset, val);
 +
 +		BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
 +			  (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
 +		bnx2x_panic();
 +	}
 +}
 +
 +static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
 +{
 +	u32 val;
 +
 +	if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
 +
 +		val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
 +		BNX2X_ERR("DB hw attention 0x%x\n", val);
 +		/* DORQ discard attention */
 +		if (val & 0x2)
 +			BNX2X_ERR("FATAL error from DORQ\n");
 +	}
 +
 +	if (attn & HW_INTERRUT_ASSERT_SET_1) {
 +
 +		int port = BP_PORT(bp);
 +		int reg_offset;
 +
 +		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
 +				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
 +
 +		val = REG_RD(bp, reg_offset);
 +		val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
 +		REG_WR(bp, reg_offset, val);
 +
 +		BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
 +			  (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
 +		bnx2x_panic();
 +	}
 +}
 +
 +static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
 +{
 +	u32 val;
 +
 +	if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
 +
 +		val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
 +		BNX2X_ERR("CFC hw attention 0x%x\n", val);
 +		/* CFC error attention */
 +		if (val & 0x2)
 +			BNX2X_ERR("FATAL error from CFC\n");
 +	}
 +
 +	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
 +		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
 +		BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
 +		/* RQ_USDMDP_FIFO_OVERFLOW */
 +		if (val & 0x18000)
 +			BNX2X_ERR("FATAL error from PXP\n");
 +
 +		if (!CHIP_IS_E1x(bp)) {
 +			val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
 +			BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
 +		}
 +	}
 +
 +	if (attn & HW_INTERRUT_ASSERT_SET_2) {
 +
 +		int port = BP_PORT(bp);
 +		int reg_offset;
 +
 +		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
 +				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
 +
 +		val = REG_RD(bp, reg_offset);
 +		val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
 +		REG_WR(bp, reg_offset, val);
 +
 +		BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
 +			  (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
 +		bnx2x_panic();
 +	}
 +}
 +
 +static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
 +{
 +	u32 val;
 +
 +	if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
 +
 +		if (attn & BNX2X_PMF_LINK_ASSERT) {
 +			int func = BP_FUNC(bp);
 +
 +			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
 +			bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
 +					func_mf_config[BP_ABS_FUNC(bp)].config);
 +			val = SHMEM_RD(bp,
 +				       func_mb[BP_FW_MB_IDX(bp)].drv_status);
 +			if (val & DRV_STATUS_DCC_EVENT_MASK)
 +				bnx2x_dcc_event(bp,
 +					    (val & DRV_STATUS_DCC_EVENT_MASK));
 +
 +			if (val & DRV_STATUS_SET_MF_BW)
 +				bnx2x_set_mf_bw(bp);
 +
 +			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
 +				bnx2x_pmf_update(bp);
 +
 +			if (bp->port.pmf &&
 +			    (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
 +				bp->dcbx_enabled > 0)
 +				/* start dcbx state machine */
 +				bnx2x_dcbx_set_params(bp,
 +					BNX2X_DCBX_STATE_NEG_RECEIVED);
 +			if (bp->link_vars.periodic_flags &
 +			    PERIODIC_FLAGS_LINK_EVENT) {
 +				/*  sync with link */
 +				bnx2x_acquire_phy_lock(bp);
 +				bp->link_vars.periodic_flags &=
 +					~PERIODIC_FLAGS_LINK_EVENT;
 +				bnx2x_release_phy_lock(bp);
 +				if (IS_MF(bp))
 +					bnx2x_link_sync_notify(bp);
 +				bnx2x_link_report(bp);
 +			}
 +			/* Always call it here: bnx2x_link_report() will
 +			 * prevent the link indication duplication.
 +			 */
 +			bnx2x__link_status_update(bp);
 +		} else if (attn & BNX2X_MC_ASSERT_BITS) {
 +
 +			BNX2X_ERR("MC assert!\n");
 +			bnx2x_mc_assert(bp);
 +			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
 +			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
 +			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
 +			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
 +			bnx2x_panic();
 +
 +		} else if (attn & BNX2X_MCP_ASSERT) {
 +
 +			BNX2X_ERR("MCP assert!\n");
 +			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
 +			bnx2x_fw_dump(bp);
 +
 +		} else
 +			BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
 +	}
 +
 +	if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
 +		BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
 +		if (attn & BNX2X_GRC_TIMEOUT) {
 +			val = CHIP_IS_E1(bp) ? 0 :
 +					REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
 +			BNX2X_ERR("GRC time-out 0x%08x\n", val);
 +		}
 +		if (attn & BNX2X_GRC_RSV) {
 +			val = CHIP_IS_E1(bp) ? 0 :
 +					REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
 +			BNX2X_ERR("GRC reserved 0x%08x\n", val);
 +		}
 +		REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
 +	}
 +}
 +
 +/*
 + * Bits map:
 + * 0-7   - Engine0 load counter.
 + * 8-15  - Engine1 load counter.
 + * 16    - Engine0 RESET_IN_PROGRESS bit.
 + * 17    - Engine1 RESET_IN_PROGRESS bit.
 + * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
 + *         on the engine
 + * 19    - Engine1 ONE_IS_LOADED.
 + * 20    - Chip reset flow bit. When set none-leader must wait for both engines
 + *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
 + *         just the one belonging to its engine).
 + *
 + */
 +#define BNX2X_RECOVERY_GLOB_REG		MISC_REG_GENERIC_POR_1
 +
 +#define BNX2X_PATH0_LOAD_CNT_MASK	0x000000ff
 +#define BNX2X_PATH0_LOAD_CNT_SHIFT	0
 +#define BNX2X_PATH1_LOAD_CNT_MASK	0x0000ff00
 +#define BNX2X_PATH1_LOAD_CNT_SHIFT	8
 +#define BNX2X_PATH0_RST_IN_PROG_BIT	0x00010000
 +#define BNX2X_PATH1_RST_IN_PROG_BIT	0x00020000
 +#define BNX2X_GLOBAL_RESET_BIT		0x00040000
 +
 +/*
 + * Set the GLOBAL_RESET bit.
 + *
 + * Should be run under rtnl lock
 + */
 +void bnx2x_set_reset_global(struct bnx2x *bp)
 +{
 +	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +
 +	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
 +	barrier();
 +	mmiowb();
 +}
 +
 +/*
 + * Clear the GLOBAL_RESET bit.
 + *
 + * Should be run under rtnl lock
 + */
 +static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
 +{
 +	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +
 +	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
 +	barrier();
 +	mmiowb();
 +}
 +
 +/*
 + * Checks the GLOBAL_RESET bit.
 + *
 + * should be run under rtnl lock
 + */
 +static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
 +{
 +	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +
 +	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
 +	return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
 +}
 +
 +/*
 + * Clear RESET_IN_PROGRESS bit for the current engine.
 + *
 + * Should be run under rtnl lock
 + */
 +static inline void bnx2x_set_reset_done(struct bnx2x *bp)
 +{
 +	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +	u32 bit = BP_PATH(bp) ?
 +		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
 +
 +	/* Clear the bit */
 +	val &= ~bit;
 +	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 +	barrier();
 +	mmiowb();
 +}
 +
 +/*
 + * Set RESET_IN_PROGRESS for the current engine.
 + *
 + * should be run under rtnl lock
 + */
 +void bnx2x_set_reset_in_progress(struct bnx2x *bp)
 +{
 +	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +	u32 bit = BP_PATH(bp) ?
 +		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
 +
 +	/* Set the bit */
 +	val |= bit;
 +	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 +	barrier();
 +	mmiowb();
 +}
 +
 +/*
 + * Checks the RESET_IN_PROGRESS bit for the given engine.
 + * should be run under rtnl lock
 + */
 +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
 +{
 +	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +	u32 bit = engine ?
 +		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
 +
 +	/* return false if bit is set */
 +	return (val & bit) ? false : true;
 +}
 +
 +/*
 + * Increment the load counter for the current engine.
 + *
 + * should be run under rtnl lock
 + */
 +void bnx2x_inc_load_cnt(struct bnx2x *bp)
 +{
 +	u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
 +			     BNX2X_PATH0_LOAD_CNT_MASK;
 +	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
 +			     BNX2X_PATH0_LOAD_CNT_SHIFT;
 +
 +	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 +
 +	/* get the current counter value */
 +	val1 = (val & mask) >> shift;
 +
 +	/* increment... */
 +	val1++;
 +
 +	/* clear the old value */
 +	val &= ~mask;
 +
 +	/* set the new one */
 +	val |= ((val1 << shift) & mask);
 +
 +	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 +	barrier();
 +	mmiowb();
 +}
 +
 +/**
 + * bnx2x_dec_load_cnt - decrement the load counter
 + *
 + * @bp:		driver handle
 + *
 + * Should be run under rtnl lock.
 + * Decrements the load counter for the current engine. Returns
 + * the new counter value.
 + */
 +u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
 +{
 +	u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
 +			     BNX2X_PATH0_LOAD_CNT_MASK;
 +	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
 +			     BNX2X_PATH0_LOAD_CNT_SHIFT;
 +
 +	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 +
 +	/* get the current counter value */
 +	val1 = (val & mask) >> shift;
 +
 +	/* decrement... */
 +	val1--;
 +
 +	/* clear the old value */
 +	val &= ~mask;
 +
 +	/* set the new one */
 +	val |= ((val1 << shift) & mask);
 +
 +	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 +	barrier();
 +	mmiowb();
 +
 +	return val1;
 +}
 +
 +/*
 + * Read the load counter for the current engine.
 + *
 + * should be run under rtnl lock
 + */
 +static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
 +{
 +	u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
 +			     BNX2X_PATH0_LOAD_CNT_MASK);
 +	u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
 +			     BNX2X_PATH0_LOAD_CNT_SHIFT);
 +	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +
 +	DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
 +
 +	val = (val & mask) >> shift;
 +
 +	DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
 +
 +	return val;
 +}
 +
 +/*
 + * Reset the load counter for the current engine.
 + *
 + * should be run under rtnl lock
 + */
 +static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
 +{
 +	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +	u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
 +			     BNX2X_PATH0_LOAD_CNT_MASK);
 +
 +	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
 +}
 +
 +static inline void _print_next_block(int idx, const char *blk)
 +{
 +	pr_cont("%s%s", idx ? ", " : "", blk);
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
 +						  bool print)
 +{
 +	int i = 0;
 +	u32 cur_bit = 0;
 +	for (i = 0; sig; i++) {
 +		cur_bit = ((u32)0x1 << i);
 +		if (sig & cur_bit) {
 +			switch (cur_bit) {
 +			case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "BRB");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "PARSER");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "TSDM");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++,
 +							  "SEARCHER");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "TCM");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "TSEMI");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "XPB");
 +				break;
 +			}
 +
 +			/* Clear the bit */
 +			sig &= ~cur_bit;
 +		}
 +	}
 +
 +	return par_num;
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
 +						  bool *global, bool print)
 +{
 +	int i = 0;
 +	u32 cur_bit = 0;
 +	for (i = 0; sig; i++) {
 +		cur_bit = ((u32)0x1 << i);
 +		if (sig & cur_bit) {
 +			switch (cur_bit) {
 +			case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "PBF");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "QM");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "TM");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "XSDM");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "XCM");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "XSEMI");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++,
 +							  "DOORBELLQ");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "NIG");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++,
 +							  "VAUX PCI CORE");
 +				*global = true;
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "DEBUG");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "USDM");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "UCM");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "USEMI");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "UPB");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "CSDM");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "CCM");
 +				break;
 +			}
 +
 +			/* Clear the bit */
 +			sig &= ~cur_bit;
 +		}
 +	}
 +
 +	return par_num;
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
 +						  bool print)
 +{
 +	int i = 0;
 +	u32 cur_bit = 0;
 +	for (i = 0; sig; i++) {
 +		cur_bit = ((u32)0x1 << i);
 +		if (sig & cur_bit) {
 +			switch (cur_bit) {
 +			case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "CSEMI");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "PXP");
 +				break;
 +			case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++,
 +					"PXPPCICLOCKCLIENT");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "CFC");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "CDU");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "DMAE");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "IGU");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "MISC");
 +				break;
 +			}
 +
 +			/* Clear the bit */
 +			sig &= ~cur_bit;
 +		}
 +	}
 +
 +	return par_num;
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
 +						  bool *global, bool print)
 +{
 +	int i = 0;
 +	u32 cur_bit = 0;
 +	for (i = 0; sig; i++) {
 +		cur_bit = ((u32)0x1 << i);
 +		if (sig & cur_bit) {
 +			switch (cur_bit) {
 +			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
 +				if (print)
 +					_print_next_block(par_num++, "MCP ROM");
 +				*global = true;
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
 +				if (print)
 +					_print_next_block(par_num++,
 +							  "MCP UMP RX");
 +				*global = true;
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
 +				if (print)
 +					_print_next_block(par_num++,
 +							  "MCP UMP TX");
 +				*global = true;
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
 +				if (print)
 +					_print_next_block(par_num++,
 +							  "MCP SCPAD");
 +				*global = true;
 +				break;
 +			}
 +
 +			/* Clear the bit */
 +			sig &= ~cur_bit;
 +		}
 +	}
 +
 +	return par_num;
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
 +						  bool print)
 +{
 +	int i = 0;
 +	u32 cur_bit = 0;
 +	for (i = 0; sig; i++) {
 +		cur_bit = ((u32)0x1 << i);
 +		if (sig & cur_bit) {
 +			switch (cur_bit) {
 +			case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "PGLUE_B");
 +				break;
 +			case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
 +				if (print)
 +					_print_next_block(par_num++, "ATC");
 +				break;
 +			}
 +
 +			/* Clear the bit */
 +			sig &= ~cur_bit;
 +		}
 +	}
 +
 +	return par_num;
 +}
 +
 +static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
 +				     u32 *sig)
 +{
 +	if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
 +	    (sig[1] & HW_PRTY_ASSERT_SET_1) ||
 +	    (sig[2] & HW_PRTY_ASSERT_SET_2) ||
 +	    (sig[3] & HW_PRTY_ASSERT_SET_3) ||
 +	    (sig[4] & HW_PRTY_ASSERT_SET_4)) {
 +		int par_num = 0;
 +		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
 +			"[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
 +			"[4]:0x%08x\n",
 +			  sig[0] & HW_PRTY_ASSERT_SET_0,
 +			  sig[1] & HW_PRTY_ASSERT_SET_1,
 +			  sig[2] & HW_PRTY_ASSERT_SET_2,
 +			  sig[3] & HW_PRTY_ASSERT_SET_3,
 +			  sig[4] & HW_PRTY_ASSERT_SET_4);
 +		if (print)
 +			netdev_err(bp->dev,
 +				   "Parity errors detected in blocks: ");
 +		par_num = bnx2x_check_blocks_with_parity0(
 +			sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
 +		par_num = bnx2x_check_blocks_with_parity1(
 +			sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
 +		par_num = bnx2x_check_blocks_with_parity2(
 +			sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
 +		par_num = bnx2x_check_blocks_with_parity3(
 +			sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
 +		par_num = bnx2x_check_blocks_with_parity4(
 +			sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
 +
 +		if (print)
 +			pr_cont("\n");
 +
 +		return true;
 +	} else
 +		return false;
 +}
 +
 +/**
 + * bnx2x_chk_parity_attn - checks for parity attentions.
 + *
 + * @bp:		driver handle
 + * @global:	true if there was a global attention
 + * @print:	show parity attention in syslog
 + */
 +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
 +{
 +	struct attn_route attn = { {0} };
 +	int port = BP_PORT(bp);
 +
 +	attn.sig[0] = REG_RD(bp,
 +		MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
 +			     port*4);
 +	attn.sig[1] = REG_RD(bp,
 +		MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
 +			     port*4);
 +	attn.sig[2] = REG_RD(bp,
 +		MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
 +			     port*4);
 +	attn.sig[3] = REG_RD(bp,
 +		MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
 +			     port*4);
 +
 +	if (!CHIP_IS_E1x(bp))
 +		attn.sig[4] = REG_RD(bp,
 +			MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
 +				     port*4);
 +
 +	return bnx2x_parity_attn(bp, global, print, attn.sig);
 +}
 +
 +
 +static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
 +{
 +	u32 val;
 +	if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
 +
 +		val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
 +		BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
 +		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
 +			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +				  "ADDRESS_ERROR\n");
 +		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
 +			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +				  "INCORRECT_RCV_BEHAVIOR\n");
 +		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
 +			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +				  "WAS_ERROR_ATTN\n");
 +		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
 +			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +				  "VF_LENGTH_VIOLATION_ATTN\n");
 +		if (val &
 +		    PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
 +			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +				  "VF_GRC_SPACE_VIOLATION_ATTN\n");
 +		if (val &
 +		    PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
 +			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +				  "VF_MSIX_BAR_VIOLATION_ATTN\n");
 +		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
 +			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +				  "TCPL_ERROR_ATTN\n");
 +		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
 +			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +				  "TCPL_IN_TWO_RCBS_ATTN\n");
 +		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
 +			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +				  "CSSNOOP_FIFO_OVERFLOW\n");
 +	}
 +	if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
 +		val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
 +		BNX2X_ERR("ATC hw attention 0x%x\n", val);
 +		if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
 +			BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
 +		if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
 +			BNX2X_ERR("ATC_ATC_INT_STS_REG"
 +				  "_ATC_TCPL_TO_NOT_PEND\n");
 +		if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
 +			BNX2X_ERR("ATC_ATC_INT_STS_REG_"
 +				  "ATC_GPA_MULTIPLE_HITS\n");
 +		if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
 +			BNX2X_ERR("ATC_ATC_INT_STS_REG_"
 +				  "ATC_RCPL_TO_EMPTY_CNT\n");
 +		if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
 +			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
 +		if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
 +			BNX2X_ERR("ATC_ATC_INT_STS_REG_"
 +				  "ATC_IREQ_LESS_THAN_STU\n");
 +	}
 +
 +	if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
 +		    AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
 +		BNX2X_ERR("FATAL parity attention set4 0x%x\n",
 +		(u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
 +		    AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
 +	}
 +
 +}
 +
 +static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
 +{
 +	struct attn_route attn, *group_mask;
 +	int port = BP_PORT(bp);
 +	int index;
 +	u32 reg_addr;
 +	u32 val;
 +	u32 aeu_mask;
 +	bool global = false;
 +
 +	/* need to take HW lock because MCP or other port might also
 +	   try to handle this event */
 +	bnx2x_acquire_alr(bp);
 +
 +	if (bnx2x_chk_parity_attn(bp, &global, true)) {
 +#ifndef BNX2X_STOP_ON_ERROR
 +		bp->recovery_state = BNX2X_RECOVERY_INIT;
 +		schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +		/* Disable HW interrupts */
 +		bnx2x_int_disable(bp);
 +		/* In case of parity errors don't handle attentions so that
 +		 * other function would "see" parity errors.
 +		 */
 +#else
 +		bnx2x_panic();
 +#endif
 +		bnx2x_release_alr(bp);
 +		return;
 +	}
 +
 +	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
 +	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
 +	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
 +	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
 +	if (!CHIP_IS_E1x(bp))
 +		attn.sig[4] =
 +		      REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
 +	else
 +		attn.sig[4] = 0;
 +
 +	DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
 +	   attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
 +
 +	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
 +		if (deasserted & (1 << index)) {
 +			group_mask = &bp->attn_group[index];
 +
 +			DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
 +					 "%08x %08x %08x\n",
 +			   index,
 +			   group_mask->sig[0], group_mask->sig[1],
 +			   group_mask->sig[2], group_mask->sig[3],
 +			   group_mask->sig[4]);
 +
 +			bnx2x_attn_int_deasserted4(bp,
 +					attn.sig[4] & group_mask->sig[4]);
 +			bnx2x_attn_int_deasserted3(bp,
 +					attn.sig[3] & group_mask->sig[3]);
 +			bnx2x_attn_int_deasserted1(bp,
 +					attn.sig[1] & group_mask->sig[1]);
 +			bnx2x_attn_int_deasserted2(bp,
 +					attn.sig[2] & group_mask->sig[2]);
 +			bnx2x_attn_int_deasserted0(bp,
 +					attn.sig[0] & group_mask->sig[0]);
 +		}
 +	}
 +
 +	bnx2x_release_alr(bp);
 +
 +	if (bp->common.int_block == INT_BLOCK_HC)
 +		reg_addr = (HC_REG_COMMAND_REG + port*32 +
 +			    COMMAND_REG_ATTN_BITS_CLR);
 +	else
 +		reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
 +
 +	val = ~deasserted;
 +	DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
 +	   (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
 +	REG_WR(bp, reg_addr, val);
 +
 +	if (~bp->attn_state & deasserted)
 +		BNX2X_ERR("IGU ERROR\n");
 +
 +	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 +			  MISC_REG_AEU_MASK_ATTN_FUNC_0;
 +
 +	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 +	aeu_mask = REG_RD(bp, reg_addr);
 +
 +	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
 +	   aeu_mask, deasserted);
 +	aeu_mask |= (deasserted & 0x3ff);
 +	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
 +
 +	REG_WR(bp, reg_addr, aeu_mask);
 +	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 +
 +	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
 +	bp->attn_state &= ~deasserted;
 +	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
 +}
 +
 +static void bnx2x_attn_int(struct bnx2x *bp)
 +{
 +	/* read local copy of bits */
 +	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
 +								attn_bits);
 +	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
 +								attn_bits_ack);
 +	u32 attn_state = bp->attn_state;
 +
 +	/* look for changed bits */
 +	u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
 +	u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
 +
 +	DP(NETIF_MSG_HW,
 +	   "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
 +	   attn_bits, attn_ack, asserted, deasserted);
 +
 +	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
 +		BNX2X_ERR("BAD attention state\n");
 +
 +	/* handle bits that were raised */
 +	if (asserted)
 +		bnx2x_attn_int_asserted(bp, asserted);
 +
 +	if (deasserted)
 +		bnx2x_attn_int_deasserted(bp, deasserted);
 +}
 +
 +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
 +		      u16 index, u8 op, u8 update)
 +{
 +	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
 +
 +	bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
 +			     igu_addr);
 +}
 +
 +static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
 +{
 +	/* No memory barriers */
 +	storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
 +	mmiowb(); /* keep prod updates ordered */
 +}
 +
 +#ifdef BCM_CNIC
 +static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
 +				      union event_ring_elem *elem)
 +{
 +	u8 err = elem->message.error;
 +
 +	if (!bp->cnic_eth_dev.starting_cid  ||
 +	    (cid < bp->cnic_eth_dev.starting_cid &&
 +	    cid != bp->cnic_eth_dev.iscsi_l2_cid))
 +		return 1;
 +
 +	DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
 +
 +	if (unlikely(err)) {
 +
 +		BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
 +			  cid);
 +		bnx2x_panic_dump(bp);
 +	}
 +	bnx2x_cnic_cfc_comp(bp, cid, err);
 +	return 0;
 +}
 +#endif
 +
 +static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
 +{
 +	struct bnx2x_mcast_ramrod_params rparam;
 +	int rc;
 +
 +	memset(&rparam, 0, sizeof(rparam));
 +
 +	rparam.mcast_obj = &bp->mcast_obj;
 +
 +	netif_addr_lock_bh(bp->dev);
 +
 +	/* Clear pending state for the last command */
 +	bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
 +
 +	/* If there are pending mcast commands - send them */
 +	if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
 +		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
 +		if (rc < 0)
 +			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
 +				  rc);
 +	}
 +
 +	netif_addr_unlock_bh(bp->dev);
 +}
 +
 +static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
 +						   union event_ring_elem *elem)
 +{
 +	unsigned long ramrod_flags = 0;
 +	int rc = 0;
 +	u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
 +	struct bnx2x_vlan_mac_obj *vlan_mac_obj;
 +
 +	/* Always push next commands out, don't wait here */
 +	__set_bit(RAMROD_CONT, &ramrod_flags);
 +
 +	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
 +	case BNX2X_FILTER_MAC_PENDING:
 +#ifdef BCM_CNIC
 +		if (cid == BNX2X_ISCSI_ETH_CID)
 +			vlan_mac_obj = &bp->iscsi_l2_mac_obj;
 +		else
 +#endif
 +			vlan_mac_obj = &bp->fp[cid].mac_obj;
 +
 +		break;
 +	case BNX2X_FILTER_MCAST_PENDING:
 +		/* This is only relevant for 57710 where multicast MACs are
 +		 * configured as unicast MACs using the same ramrod.
 +		 */
 +		bnx2x_handle_mcast_eqe(bp);
 +		return;
 +	default:
 +		BNX2X_ERR("Unsupported classification command: %d\n",
 +			  elem->message.data.eth_event.echo);
 +		return;
 +	}
 +
 +	rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
 +
 +	if (rc < 0)
 +		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
 +	else if (rc > 0)
 +		DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
 +
 +}
 +
 +#ifdef BCM_CNIC
 +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
 +#endif
 +
 +static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
 +{
 +	netif_addr_lock_bh(bp->dev);
 +
 +	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
 +
 +	/* Send rx_mode command again if was requested */
 +	if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
 +		bnx2x_set_storm_rx_mode(bp);
 +#ifdef BCM_CNIC
 +	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
 +				    &bp->sp_state))
 +		bnx2x_set_iscsi_eth_rx_mode(bp, true);
 +	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
 +				    &bp->sp_state))
 +		bnx2x_set_iscsi_eth_rx_mode(bp, false);
 +#endif
 +
 +	netif_addr_unlock_bh(bp->dev);
 +}
 +
 +static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
 +	struct bnx2x *bp, u32 cid)
 +{
 +	DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
 +#ifdef BCM_CNIC
 +	if (cid == BNX2X_FCOE_ETH_CID)
 +		return &bnx2x_fcoe(bp, q_obj);
 +	else
 +#endif
 +		return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
 +}
 +
 +static void bnx2x_eq_int(struct bnx2x *bp)
 +{
 +	u16 hw_cons, sw_cons, sw_prod;
 +	union event_ring_elem *elem;
 +	u32 cid;
 +	u8 opcode;
 +	int spqe_cnt = 0;
 +	struct bnx2x_queue_sp_obj *q_obj;
 +	struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
 +	struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
 +
 +	hw_cons = le16_to_cpu(*bp->eq_cons_sb);
 +
 +	/* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
 +	 * when we get the the next-page we nned to adjust so the loop
 +	 * condition below will be met. The next element is the size of a
 +	 * regular element and hence incrementing by 1
 +	 */
 +	if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
 +		hw_cons++;
 +
 +	/* This function may never run in parallel with itself for a
 +	 * specific bp, thus there is no need in "paired" read memory
 +	 * barrier here.
 +	 */
 +	sw_cons = bp->eq_cons;
 +	sw_prod = bp->eq_prod;
 +
 +	DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
 +			hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
 +
 +	for (; sw_cons != hw_cons;
 +	      sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
 +
 +
 +		elem = &bp->eq_ring[EQ_DESC(sw_cons)];
 +
 +		cid = SW_CID(elem->message.data.cfc_del_event.cid);
 +		opcode = elem->message.opcode;
 +
 +
 +		/* handle eq element */
 +		switch (opcode) {
 +		case EVENT_RING_OPCODE_STAT_QUERY:
 +			DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
 +			   bp->stats_comp++);
 +			/* nothing to do with stats comp */
 +			goto next_spqe;
 +
 +		case EVENT_RING_OPCODE_CFC_DEL:
 +			/* handle according to cid range */
 +			/*
 +			 * we may want to verify here that the bp state is
 +			 * HALTING
 +			 */
 +			DP(BNX2X_MSG_SP,
 +			   "got delete ramrod for MULTI[%d]\n", cid);
 +#ifdef BCM_CNIC
 +			if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
 +				goto next_spqe;
 +#endif
 +			q_obj = bnx2x_cid_to_q_obj(bp, cid);
 +
 +			if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
 +				break;
 +
 +
 +
 +			goto next_spqe;
 +
 +		case EVENT_RING_OPCODE_STOP_TRAFFIC:
 +			DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
 +			if (f_obj->complete_cmd(bp, f_obj,
 +						BNX2X_F_CMD_TX_STOP))
 +				break;
 +			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
 +			goto next_spqe;
 +
 +		case EVENT_RING_OPCODE_START_TRAFFIC:
 +			DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
 +			if (f_obj->complete_cmd(bp, f_obj,
 +						BNX2X_F_CMD_TX_START))
 +				break;
 +			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
 +			goto next_spqe;
 +		case EVENT_RING_OPCODE_FUNCTION_START:
 +			DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
 +			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
 +				break;
 +
 +			goto next_spqe;
 +
 +		case EVENT_RING_OPCODE_FUNCTION_STOP:
 +			DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
 +			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
 +				break;
 +
 +			goto next_spqe;
 +		}
 +
 +		switch (opcode | bp->state) {
 +		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
 +		      BNX2X_STATE_OPEN):
 +		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
 +		      BNX2X_STATE_OPENING_WAIT4_PORT):
 +			cid = elem->message.data.eth_event.echo &
 +				BNX2X_SWCID_MASK;
 +			DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
 +			   cid);
 +			rss_raw->clear_pending(rss_raw);
 +			break;
 +
 +		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
 +		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
 +		case (EVENT_RING_OPCODE_SET_MAC |
 +		      BNX2X_STATE_CLOSING_WAIT4_HALT):
 +		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
 +		      BNX2X_STATE_OPEN):
 +		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
 +		      BNX2X_STATE_DIAG):
 +		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
 +		      BNX2X_STATE_CLOSING_WAIT4_HALT):
 +			DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
 +			bnx2x_handle_classification_eqe(bp, elem);
 +			break;
 +
 +		case (EVENT_RING_OPCODE_MULTICAST_RULES |
 +		      BNX2X_STATE_OPEN):
 +		case (EVENT_RING_OPCODE_MULTICAST_RULES |
 +		      BNX2X_STATE_DIAG):
 +		case (EVENT_RING_OPCODE_MULTICAST_RULES |
 +		      BNX2X_STATE_CLOSING_WAIT4_HALT):
 +			DP(BNX2X_MSG_SP, "got mcast ramrod\n");
 +			bnx2x_handle_mcast_eqe(bp);
 +			break;
 +
 +		case (EVENT_RING_OPCODE_FILTERS_RULES |
 +		      BNX2X_STATE_OPEN):
 +		case (EVENT_RING_OPCODE_FILTERS_RULES |
 +		      BNX2X_STATE_DIAG):
 +		case (EVENT_RING_OPCODE_FILTERS_RULES |
 +		      BNX2X_STATE_CLOSING_WAIT4_HALT):
 +			DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
 +			bnx2x_handle_rx_mode_eqe(bp);
 +			break;
 +		default:
 +			/* unknown event log error and continue */
 +			BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
 +				  elem->message.opcode, bp->state);
 +		}
 +next_spqe:
 +		spqe_cnt++;
 +	} /* for */
 +
 +	smp_mb__before_atomic_inc();
 +	atomic_add(spqe_cnt, &bp->eq_spq_left);
 +
 +	bp->eq_cons = sw_cons;
 +	bp->eq_prod = sw_prod;
 +	/* Make sure that above mem writes were issued towards the memory */
 +	smp_wmb();
 +
 +	/* update producer */
 +	bnx2x_update_eq_prod(bp, bp->eq_prod);
 +}
 +
 +static void bnx2x_sp_task(struct work_struct *work)
 +{
 +	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
 +	u16 status;
 +
 +	status = bnx2x_update_dsb_idx(bp);
 +/*	if (status == 0)				     */
 +/*		BNX2X_ERR("spurious slowpath interrupt!\n"); */
 +
 +	DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
 +
 +	/* HW attentions */
 +	if (status & BNX2X_DEF_SB_ATT_IDX) {
 +		bnx2x_attn_int(bp);
 +		status &= ~BNX2X_DEF_SB_ATT_IDX;
 +	}
 +
 +	/* SP events: STAT_QUERY and others */
 +	if (status & BNX2X_DEF_SB_IDX) {
 +#ifdef BCM_CNIC
 +		struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
 +
 +		if ((!NO_FCOE(bp)) &&
 +			(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 +			/*
 +			 * Prevent local bottom-halves from running as
 +			 * we are going to change the local NAPI list.
 +			 */
 +			local_bh_disable();
 +			napi_schedule(&bnx2x_fcoe(bp, napi));
 +			local_bh_enable();
 +		}
 +#endif
 +		/* Handle EQ completions */
 +		bnx2x_eq_int(bp);
 +
 +		bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
 +			le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
 +
 +		status &= ~BNX2X_DEF_SB_IDX;
 +	}
 +
 +	if (unlikely(status))
 +		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
 +		   status);
 +
 +	bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
 +	     le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
 +}
 +
 +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
 +{
 +	struct net_device *dev = dev_instance;
 +	struct bnx2x *bp = netdev_priv(dev);
 +
 +	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
 +		     IGU_INT_DISABLE, 0);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +	if (unlikely(bp->panic))
 +		return IRQ_HANDLED;
 +#endif
 +
 +#ifdef BCM_CNIC
 +	{
 +		struct cnic_ops *c_ops;
 +
 +		rcu_read_lock();
 +		c_ops = rcu_dereference(bp->cnic_ops);
 +		if (c_ops)
 +			c_ops->cnic_handler(bp->cnic_data, NULL);
 +		rcu_read_unlock();
 +	}
 +#endif
 +	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 +
 +	return IRQ_HANDLED;
 +}
 +
 +/* end of slow path */
 +
 +
 +void bnx2x_drv_pulse(struct bnx2x *bp)
 +{
 +	SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
 +		 bp->fw_drv_pulse_wr_seq);
 +}
 +
 +
 +static void bnx2x_timer(unsigned long data)
 +{
 +	u8 cos;
 +	struct bnx2x *bp = (struct bnx2x *) data;
 +
 +	if (!netif_running(bp->dev))
 +		return;
 +
 +	if (poll) {
 +		struct bnx2x_fastpath *fp = &bp->fp[0];
 +
 +		for_each_cos_in_tx_queue(fp, cos)
 +			bnx2x_tx_int(bp, &fp->txdata[cos]);
 +		bnx2x_rx_int(fp, 1000);
 +	}
 +
 +	if (!BP_NOMCP(bp)) {
 +		int mb_idx = BP_FW_MB_IDX(bp);
 +		u32 drv_pulse;
 +		u32 mcp_pulse;
 +
 +		++bp->fw_drv_pulse_wr_seq;
 +		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
 +		/* TBD - add SYSTEM_TIME */
 +		drv_pulse = bp->fw_drv_pulse_wr_seq;
 +		bnx2x_drv_pulse(bp);
 +
 +		mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
 +			     MCP_PULSE_SEQ_MASK);
 +		/* The delta between driver pulse and mcp response
 +		 * should be 1 (before mcp response) or 0 (after mcp response)
 +		 */
 +		if ((drv_pulse != mcp_pulse) &&
 +		    (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
 +			/* someone lost a heartbeat... */
 +			BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
 +				  drv_pulse, mcp_pulse);
 +		}
 +	}
 +
 +	if (bp->state == BNX2X_STATE_OPEN)
 +		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
 +
 +	mod_timer(&bp->timer, jiffies + bp->current_interval);
 +}
 +
 +/* end of Statistics */
 +
 +/* nic init */
 +
 +/*
 + * nic init service functions
 + */
 +
 +static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
 +{
 +	u32 i;
 +	if (!(len%4) && !(addr%4))
 +		for (i = 0; i < len; i += 4)
 +			REG_WR(bp, addr + i, fill);
 +	else
 +		for (i = 0; i < len; i++)
 +			REG_WR8(bp, addr + i, fill);
 +
 +}
 +
 +/* helper: writes FP SP data to FW - data_size in dwords */
 +static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
 +				       int fw_sb_id,
 +				       u32 *sb_data_p,
 +				       u32 data_size)
 +{
 +	int index;
 +	for (index = 0; index < data_size; index++)
 +		REG_WR(bp, BAR_CSTRORM_INTMEM +
 +			CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
 +			sizeof(u32)*index,
 +			*(sb_data_p + index));
 +}
 +
 +static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
 +{
 +	u32 *sb_data_p;
 +	u32 data_size = 0;
 +	struct hc_status_block_data_e2 sb_data_e2;
 +	struct hc_status_block_data_e1x sb_data_e1x;
 +
 +	/* disable the function first */
 +	if (!CHIP_IS_E1x(bp)) {
 +		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
 +		sb_data_e2.common.state = SB_DISABLED;
 +		sb_data_e2.common.p_func.vf_valid = false;
 +		sb_data_p = (u32 *)&sb_data_e2;
 +		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
 +	} else {
 +		memset(&sb_data_e1x, 0,
 +		       sizeof(struct hc_status_block_data_e1x));
 +		sb_data_e1x.common.state = SB_DISABLED;
 +		sb_data_e1x.common.p_func.vf_valid = false;
 +		sb_data_p = (u32 *)&sb_data_e1x;
 +		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
 +	}
 +	bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
 +
 +	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
 +			CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
 +			CSTORM_STATUS_BLOCK_SIZE);
 +	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
 +			CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
 +			CSTORM_SYNC_BLOCK_SIZE);
 +}
 +
 +/* helper:  writes SP SB data to FW */
 +static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
 +		struct hc_sp_status_block_data *sp_sb_data)
 +{
 +	int func = BP_FUNC(bp);
 +	int i;
 +	for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
 +		REG_WR(bp, BAR_CSTRORM_INTMEM +
 +			CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
 +			i*sizeof(u32),
 +			*((u32 *)sp_sb_data + i));
 +}
 +
 +static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
 +{
 +	int func = BP_FUNC(bp);
 +	struct hc_sp_status_block_data sp_sb_data;
 +	memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
 +
 +	sp_sb_data.state = SB_DISABLED;
 +	sp_sb_data.p_func.vf_valid = false;
 +
 +	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
 +
 +	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
 +			CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
 +			CSTORM_SP_STATUS_BLOCK_SIZE);
 +	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
 +			CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
 +			CSTORM_SP_SYNC_BLOCK_SIZE);
 +
 +}
 +
 +
 +static inline
 +void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
 +					   int igu_sb_id, int igu_seg_id)
 +{
 +	hc_sm->igu_sb_id = igu_sb_id;
 +	hc_sm->igu_seg_id = igu_seg_id;
 +	hc_sm->timer_value = 0xFF;
 +	hc_sm->time_to_expire = 0xFFFFFFFF;
 +}
 +
 +
 +/* allocates state machine ids. */
 +static inline
 +void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
 +{
 +	/* zero out state machine indices */
 +	/* rx indices */
 +	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
 +
 +	/* tx indices */
 +	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
 +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
 +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
 +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
 +
 +	/* map indices */
 +	/* rx indices */
 +	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
 +		SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
 +
 +	/* tx indices */
 +	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
 +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
 +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
 +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
 +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
 +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
 +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
 +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
 +}
 +
 +static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
 +			  u8 vf_valid, int fw_sb_id, int igu_sb_id)
 +{
 +	int igu_seg_id;
 +
 +	struct hc_status_block_data_e2 sb_data_e2;
 +	struct hc_status_block_data_e1x sb_data_e1x;
 +	struct hc_status_block_sm  *hc_sm_p;
 +	int data_size;
 +	u32 *sb_data_p;
 +
 +	if (CHIP_INT_MODE_IS_BC(bp))
 +		igu_seg_id = HC_SEG_ACCESS_NORM;
 +	else
 +		igu_seg_id = IGU_SEG_ACCESS_NORM;
 +
 +	bnx2x_zero_fp_sb(bp, fw_sb_id);
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
 +		sb_data_e2.common.state = SB_ENABLED;
 +		sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
 +		sb_data_e2.common.p_func.vf_id = vfid;
 +		sb_data_e2.common.p_func.vf_valid = vf_valid;
 +		sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
 +		sb_data_e2.common.same_igu_sb_1b = true;
 +		sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
 +		sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
 +		hc_sm_p = sb_data_e2.common.state_machine;
 +		sb_data_p = (u32 *)&sb_data_e2;
 +		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
 +		bnx2x_map_sb_state_machines(sb_data_e2.index_data);
 +	} else {
 +		memset(&sb_data_e1x, 0,
 +		       sizeof(struct hc_status_block_data_e1x));
 +		sb_data_e1x.common.state = SB_ENABLED;
 +		sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
 +		sb_data_e1x.common.p_func.vf_id = 0xff;
 +		sb_data_e1x.common.p_func.vf_valid = false;
 +		sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
 +		sb_data_e1x.common.same_igu_sb_1b = true;
 +		sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
 +		sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
 +		hc_sm_p = sb_data_e1x.common.state_machine;
 +		sb_data_p = (u32 *)&sb_data_e1x;
 +		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
 +		bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
 +	}
 +
 +	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
 +				       igu_sb_id, igu_seg_id);
 +	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
 +				       igu_sb_id, igu_seg_id);
 +
 +	DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
 +
 +	/* write indecies to HW */
 +	bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
 +}
 +
 +static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
 +				     u16 tx_usec, u16 rx_usec)
 +{
 +	bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
 +				    false, rx_usec);
 +	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
 +				       HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
 +				       tx_usec);
 +	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
 +				       HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
 +				       tx_usec);
 +	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
 +				       HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
 +				       tx_usec);
 +}
 +
 +static void bnx2x_init_def_sb(struct bnx2x *bp)
 +{
 +	struct host_sp_status_block *def_sb = bp->def_status_blk;
 +	dma_addr_t mapping = bp->def_status_blk_mapping;
 +	int igu_sp_sb_index;
 +	int igu_seg_id;
 +	int port = BP_PORT(bp);
 +	int func = BP_FUNC(bp);
- 	int reg_offset;
++	int reg_offset, reg_offset_en5;
 +	u64 section;
 +	int index;
 +	struct hc_sp_status_block_data sp_sb_data;
 +	memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
 +
 +	if (CHIP_INT_MODE_IS_BC(bp)) {
 +		igu_sp_sb_index = DEF_SB_IGU_ID;
 +		igu_seg_id = HC_SEG_ACCESS_DEF;
 +	} else {
 +		igu_sp_sb_index = bp->igu_dsb_id;
 +		igu_seg_id = IGU_SEG_ACCESS_DEF;
 +	}
 +
 +	/* ATTN */
 +	section = ((u64)mapping) + offsetof(struct host_sp_status_block,
 +					    atten_status_block);
 +	def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
 +
 +	bp->attn_state = 0;
 +
 +	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
 +			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
++	reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
++				 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
 +	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
 +		int sindex;
 +		/* take care of sig[0]..sig[4] */
 +		for (sindex = 0; sindex < 4; sindex++)
 +			bp->attn_group[index].sig[sindex] =
 +			   REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
 +
 +		if (!CHIP_IS_E1x(bp))
 +			/*
 +			 * enable5 is separate from the rest of the registers,
 +			 * and therefore the address skip is 4
 +			 * and not 16 between the different groups
 +			 */
 +			bp->attn_group[index].sig[4] = REG_RD(bp,
- 					reg_offset + 0x10 + 0x4*index);
++					reg_offset_en5 + 0x4*index);
 +		else
 +			bp->attn_group[index].sig[4] = 0;
 +	}
 +
 +	if (bp->common.int_block == INT_BLOCK_HC) {
 +		reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
 +				     HC_REG_ATTN_MSG0_ADDR_L);
 +
 +		REG_WR(bp, reg_offset, U64_LO(section));
 +		REG_WR(bp, reg_offset + 4, U64_HI(section));
 +	} else if (!CHIP_IS_E1x(bp)) {
 +		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
 +		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
 +	}
 +
 +	section = ((u64)mapping) + offsetof(struct host_sp_status_block,
 +					    sp_sb);
 +
 +	bnx2x_zero_sp_sb(bp);
 +
 +	sp_sb_data.state		= SB_ENABLED;
 +	sp_sb_data.host_sb_addr.lo	= U64_LO(section);
 +	sp_sb_data.host_sb_addr.hi	= U64_HI(section);
 +	sp_sb_data.igu_sb_id		= igu_sp_sb_index;
 +	sp_sb_data.igu_seg_id		= igu_seg_id;
 +	sp_sb_data.p_func.pf_id		= func;
 +	sp_sb_data.p_func.vnic_id	= BP_VN(bp);
 +	sp_sb_data.p_func.vf_id		= 0xff;
 +
 +	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
 +
 +	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
 +}
 +
 +void bnx2x_update_coalesce(struct bnx2x *bp)
 +{
 +	int i;
 +
 +	for_each_eth_queue(bp, i)
 +		bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
 +					 bp->tx_ticks, bp->rx_ticks);
 +}
 +
 +static void bnx2x_init_sp_ring(struct bnx2x *bp)
 +{
 +	spin_lock_init(&bp->spq_lock);
 +	atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
 +
 +	bp->spq_prod_idx = 0;
 +	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
 +	bp->spq_prod_bd = bp->spq;
 +	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
 +}
 +
 +static void bnx2x_init_eq_ring(struct bnx2x *bp)
 +{
 +	int i;
 +	for (i = 1; i <= NUM_EQ_PAGES; i++) {
 +		union event_ring_elem *elem =
 +			&bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
 +
 +		elem->next_page.addr.hi =
 +			cpu_to_le32(U64_HI(bp->eq_mapping +
 +				   BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
 +		elem->next_page.addr.lo =
 +			cpu_to_le32(U64_LO(bp->eq_mapping +
 +				   BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
 +	}
 +	bp->eq_cons = 0;
 +	bp->eq_prod = NUM_EQ_DESC;
 +	bp->eq_cons_sb = BNX2X_EQ_INDEX;
 +	/* we want a warning message before it gets rought... */
 +	atomic_set(&bp->eq_spq_left,
 +		min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
 +}
 +
 +
 +/* called with netif_addr_lock_bh() */
 +void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
 +			 unsigned long rx_mode_flags,
 +			 unsigned long rx_accept_flags,
 +			 unsigned long tx_accept_flags,
 +			 unsigned long ramrod_flags)
 +{
 +	struct bnx2x_rx_mode_ramrod_params ramrod_param;
 +	int rc;
 +
 +	memset(&ramrod_param, 0, sizeof(ramrod_param));
 +
 +	/* Prepare ramrod parameters */
 +	ramrod_param.cid = 0;
 +	ramrod_param.cl_id = cl_id;
 +	ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
 +	ramrod_param.func_id = BP_FUNC(bp);
 +
 +	ramrod_param.pstate = &bp->sp_state;
 +	ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
 +
 +	ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
 +	ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
 +
 +	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
 +
 +	ramrod_param.ramrod_flags = ramrod_flags;
 +	ramrod_param.rx_mode_flags = rx_mode_flags;
 +
 +	ramrod_param.rx_accept_flags = rx_accept_flags;
 +	ramrod_param.tx_accept_flags = tx_accept_flags;
 +
 +	rc = bnx2x_config_rx_mode(bp, &ramrod_param);
 +	if (rc < 0) {
 +		BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
 +		return;
 +	}
 +}
 +
 +/* called with netif_addr_lock_bh() */
 +void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 +{
 +	unsigned long rx_mode_flags = 0, ramrod_flags = 0;
 +	unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
 +
 +#ifdef BCM_CNIC
 +	if (!NO_FCOE(bp))
 +
 +		/* Configure rx_mode of FCoE Queue */
 +		__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
 +#endif
 +
 +	switch (bp->rx_mode) {
 +	case BNX2X_RX_MODE_NONE:
 +		/*
 +		 * 'drop all' supersedes any accept flags that may have been
 +		 * passed to the function.
 +		 */
 +		break;
 +	case BNX2X_RX_MODE_NORMAL:
 +		__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 +
 +		/* internal switching mode */
 +		__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
 +
 +		break;
 +	case BNX2X_RX_MODE_ALLMULTI:
 +		__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 +
 +		/* internal switching mode */
 +		__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
 +
 +		break;
 +	case BNX2X_RX_MODE_PROMISC:
 +		/* According to deffinition of SI mode, iface in promisc mode
 +		 * should receive matched and unmatched (in resolution of port)
 +		 * unicast packets.
 +		 */
 +		__set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 +
 +		/* internal switching mode */
 +		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
 +
 +		if (IS_MF_SI(bp))
 +			__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
 +		else
 +			__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
 +
 +		break;
 +	default:
 +		BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
 +		return;
 +	}
 +
 +	if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
 +		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
 +		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
 +	}
 +
 +	__set_bit(RAMROD_RX, &ramrod_flags);
 +	__set_bit(RAMROD_TX, &ramrod_flags);
 +
 +	bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
 +			    tx_accept_flags, ramrod_flags);
 +}
 +
 +static void bnx2x_init_internal_common(struct bnx2x *bp)
 +{
 +	int i;
 +
 +	if (IS_MF_SI(bp))
 +		/*
 +		 * In switch independent mode, the TSTORM needs to accept
 +		 * packets that failed classification, since approximate match
 +		 * mac addresses aren't written to NIG LLH
 +		 */
 +		REG_WR8(bp, BAR_TSTRORM_INTMEM +
 +			    TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
 +	else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
 +		REG_WR8(bp, BAR_TSTRORM_INTMEM +
 +			    TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
 +
 +	/* Zero this manually as its initialization is
 +	   currently missing in the initTool */
 +	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
 +		REG_WR(bp, BAR_USTRORM_INTMEM +
 +		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
 +	if (!CHIP_IS_E1x(bp)) {
 +		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
 +			CHIP_INT_MODE_IS_BC(bp) ?
 +			HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
 +	}
 +}
 +
 +static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
 +{
 +	switch (load_code) {
 +	case FW_MSG_CODE_DRV_LOAD_COMMON:
 +	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
 +		bnx2x_init_internal_common(bp);
 +		/* no break */
 +
 +	case FW_MSG_CODE_DRV_LOAD_PORT:
 +		/* nothing to do */
 +		/* no break */
 +
 +	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
 +		/* internal memory per function is
 +		   initialized inside bnx2x_pf_init */
 +		break;
 +
 +	default:
 +		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
 +		break;
 +	}
 +}
 +
 +static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
 +{
 +	return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
 +}
 +
 +static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
 +{
 +	return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
 +}
 +
 +static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
 +{
 +	if (CHIP_IS_E1x(fp->bp))
 +		return BP_L_ID(fp->bp) + fp->index;
 +	else	/* We want Client ID to be the same as IGU SB ID for 57712 */
 +		return bnx2x_fp_igu_sb_id(fp);
 +}
 +
 +static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
 +{
 +	struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
 +	u8 cos;
 +	unsigned long q_type = 0;
 +	u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
 +
 +	fp->cid = fp_idx;
 +	fp->cl_id = bnx2x_fp_cl_id(fp);
 +	fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
 +	fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
 +	/* qZone id equals to FW (per path) client id */
 +	fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
 +
 +	/* init shortcut */
 +	fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
 +	/* Setup SB indicies */
 +	fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
 +
 +	/* Configure Queue State object */
 +	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
 +	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
 +
 +	BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
 +
 +	/* init tx data */
 +	for_each_cos_in_tx_queue(fp, cos) {
 +		bnx2x_init_txdata(bp, &fp->txdata[cos],
 +				  CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
 +				  FP_COS_TO_TXQ(fp, cos),
 +				  BNX2X_TX_SB_INDEX_BASE + cos);
 +		cids[cos] = fp->txdata[cos].cid;
 +	}
 +
 +	bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
 +			     BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
 +			     bnx2x_sp_mapping(bp, q_rdata), q_type);
 +
 +	/**
 +	 * Configure classification DBs: Always enable Tx switching
 +	 */
 +	bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
 +
 +	DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
 +				   "cl_id %d  fw_sb %d  igu_sb %d\n",
 +		   fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
 +		   fp->igu_sb_id);
 +	bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
 +		      fp->fw_sb_id, fp->igu_sb_id);
 +
 +	bnx2x_update_fpsb_idx(fp);
 +}
 +
 +void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 +{
 +	int i;
 +
 +	for_each_eth_queue(bp, i)
 +		bnx2x_init_eth_fp(bp, i);
 +#ifdef BCM_CNIC
 +	if (!NO_FCOE(bp))
 +		bnx2x_init_fcoe_fp(bp);
 +
 +	bnx2x_init_sb(bp, bp->cnic_sb_mapping,
 +		      BNX2X_VF_ID_INVALID, false,
 +		      bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
 +
 +#endif
 +
 +	/* Initialize MOD_ABS interrupts */
 +	bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
 +			       bp->common.shmem_base, bp->common.shmem2_base,
 +			       BP_PORT(bp));
 +	/* ensure status block indices were read */
 +	rmb();
 +
 +	bnx2x_init_def_sb(bp);
 +	bnx2x_update_dsb_idx(bp);
 +	bnx2x_init_rx_rings(bp);
 +	bnx2x_init_tx_rings(bp);
 +	bnx2x_init_sp_ring(bp);
 +	bnx2x_init_eq_ring(bp);
 +	bnx2x_init_internal(bp, load_code);
 +	bnx2x_pf_init(bp);
 +	bnx2x_stats_init(bp);
 +
 +	/* flush all before enabling interrupts */
 +	mb();
 +	mmiowb();
 +
 +	bnx2x_int_enable(bp);
 +
 +	/* Check for SPIO5 */
 +	bnx2x_attn_int_deasserted0(bp,
 +		REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
 +				   AEU_INPUTS_ATTN_BITS_SPIO5);
 +}
 +
 +/* end of nic init */
 +
 +/*
 + * gzip service functions
 + */
 +
 +static int bnx2x_gunzip_init(struct bnx2x *bp)
 +{
 +	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
 +					    &bp->gunzip_mapping, GFP_KERNEL);
 +	if (bp->gunzip_buf  == NULL)
 +		goto gunzip_nomem1;
 +
 +	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
 +	if (bp->strm  == NULL)
 +		goto gunzip_nomem2;
 +
 +	bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
 +	if (bp->strm->workspace == NULL)
 +		goto gunzip_nomem3;
 +
 +	return 0;
 +
 +gunzip_nomem3:
 +	kfree(bp->strm);
 +	bp->strm = NULL;
 +
 +gunzip_nomem2:
 +	dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
 +			  bp->gunzip_mapping);
 +	bp->gunzip_buf = NULL;
 +
 +gunzip_nomem1:
 +	netdev_err(bp->dev, "Cannot allocate firmware buffer for"
 +	       " un-compression\n");
 +	return -ENOMEM;
 +}
 +
 +static void bnx2x_gunzip_end(struct bnx2x *bp)
 +{
 +	if (bp->strm) {
 +		vfree(bp->strm->workspace);
 +		kfree(bp->strm);
 +		bp->strm = NULL;
 +	}
 +
 +	if (bp->gunzip_buf) {
 +		dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
 +				  bp->gunzip_mapping);
 +		bp->gunzip_buf = NULL;
 +	}
 +}
 +
 +static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
 +{
 +	int n, rc;
 +
 +	/* check gzip header */
 +	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
 +		BNX2X_ERR("Bad gzip header\n");
 +		return -EINVAL;
 +	}
 +
 +	n = 10;
 +
 +#define FNAME				0x8
 +
 +	if (zbuf[3] & FNAME)
 +		while ((zbuf[n++] != 0) && (n < len));
 +
 +	bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
 +	bp->strm->avail_in = len - n;
 +	bp->strm->next_out = bp->gunzip_buf;
 +	bp->strm->avail_out = FW_BUF_SIZE;
 +
 +	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
 +	if (rc != Z_OK)
 +		return rc;
 +
 +	rc = zlib_inflate(bp->strm, Z_FINISH);
 +	if ((rc != Z_OK) && (rc != Z_STREAM_END))
 +		netdev_err(bp->dev, "Firmware decompression error: %s\n",
 +			   bp->strm->msg);
 +
 +	bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
 +	if (bp->gunzip_outlen & 0x3)
 +		netdev_err(bp->dev, "Firmware decompression error:"
 +				    " gunzip_outlen (%d) not aligned\n",
 +				bp->gunzip_outlen);
 +	bp->gunzip_outlen >>= 2;
 +
 +	zlib_inflateEnd(bp->strm);
 +
 +	if (rc == Z_STREAM_END)
 +		return 0;
 +
 +	return rc;
 +}
 +
 +/* nic load/unload */
 +
 +/*
 + * General service functions
 + */
 +
 +/* send a NIG loopback debug packet */
 +static void bnx2x_lb_pckt(struct bnx2x *bp)
 +{
 +	u32 wb_write[3];
 +
 +	/* Ethernet source and destination addresses */
 +	wb_write[0] = 0x55555555;
 +	wb_write[1] = 0x55555555;
 +	wb_write[2] = 0x20;		/* SOP */
 +	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
 +
 +	/* NON-IP protocol */
 +	wb_write[0] = 0x09000000;
 +	wb_write[1] = 0x55555555;
 +	wb_write[2] = 0x10;		/* EOP, eop_bvalid = 0 */
 +	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
 +}
 +
 +/* some of the internal memories
 + * are not directly readable from the driver
 + * to test them we send debug packets
 + */
 +static int bnx2x_int_mem_test(struct bnx2x *bp)
 +{
 +	int factor;
 +	int count, i;
 +	u32 val = 0;
 +
 +	if (CHIP_REV_IS_FPGA(bp))
 +		factor = 120;
 +	else if (CHIP_REV_IS_EMUL(bp))
 +		factor = 200;
 +	else
 +		factor = 1;
 +
 +	/* Disable inputs of parser neighbor blocks */
 +	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
 +	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
 +	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
 +	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
 +
 +	/*  Write 0 to parser credits for CFC search request */
 +	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
 +
 +	/* send Ethernet packet */
 +	bnx2x_lb_pckt(bp);
 +
 +	/* TODO do i reset NIG statistic? */
 +	/* Wait until NIG register shows 1 packet of size 0x10 */
 +	count = 1000 * factor;
 +	while (count) {
 +
 +		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
 +		val = *bnx2x_sp(bp, wb_data[0]);
 +		if (val == 0x10)
 +			break;
 +
 +		msleep(10);
 +		count--;
 +	}
 +	if (val != 0x10) {
 +		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
 +		return -1;
 +	}
 +
 +	/* Wait until PRS register shows 1 packet */
 +	count = 1000 * factor;
 +	while (count) {
 +		val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
 +		if (val == 1)
 +			break;
 +
 +		msleep(10);
 +		count--;
 +	}
 +	if (val != 0x1) {
 +		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
 +		return -2;
 +	}
 +
 +	/* Reset and init BRB, PRS */
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
 +	msleep(50);
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
 +	msleep(50);
 +	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
 +
 +	DP(NETIF_MSG_HW, "part2\n");
 +
 +	/* Disable inputs of parser neighbor blocks */
 +	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
 +	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
 +	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
 +	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
 +
 +	/* Write 0 to parser credits for CFC search request */
 +	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
 +
 +	/* send 10 Ethernet packets */
 +	for (i = 0; i < 10; i++)
 +		bnx2x_lb_pckt(bp);
 +
 +	/* Wait until NIG register shows 10 + 1
 +	   packets of size 11*0x10 = 0xb0 */
 +	count = 1000 * factor;
 +	while (count) {
 +
 +		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
 +		val = *bnx2x_sp(bp, wb_data[0]);
 +		if (val == 0xb0)
 +			break;
 +
 +		msleep(10);
 +		count--;
 +	}
 +	if (val != 0xb0) {
 +		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
 +		return -3;
 +	}
 +
 +	/* Wait until PRS register shows 2 packets */
 +	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
 +	if (val != 2)
 +		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
 +
 +	/* Write 1 to parser credits for CFC search request */
 +	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
 +
 +	/* Wait until PRS register shows 3 packets */
 +	msleep(10 * factor);
 +	/* Wait until NIG register shows 1 packet of size 0x10 */
 +	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
 +	if (val != 3)
 +		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
 +
 +	/* clear NIG EOP FIFO */
 +	for (i = 0; i < 11; i++)
 +		REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
 +	val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
 +	if (val != 1) {
 +		BNX2X_ERR("clear of NIG failed\n");
 +		return -4;
 +	}
 +
 +	/* Reset and init BRB, PRS, NIG */
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
 +	msleep(50);
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
 +	msleep(50);
 +	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
 +#ifndef BCM_CNIC
 +	/* set NIC mode */
 +	REG_WR(bp, PRS_REG_NIC_MODE, 1);
 +#endif
 +
 +	/* Enable inputs of parser neighbor blocks */
 +	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
 +	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
 +	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
 +	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
 +
 +	DP(NETIF_MSG_HW, "done\n");
 +
 +	return 0; /* OK */
 +}
 +
 +static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
 +{
 +	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
 +	if (!CHIP_IS_E1x(bp))
 +		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
 +	else
 +		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
 +	REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
 +	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
 +	/*
 +	 * mask read length error interrupts in brb for parser
 +	 * (parsing unit and 'checksum and crc' unit)
 +	 * these errors are legal (PU reads fixed length and CAC can cause
 +	 * read length error on truncated packets)
 +	 */
 +	REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
 +	REG_WR(bp, QM_REG_QM_INT_MASK, 0);
 +	REG_WR(bp, TM_REG_TM_INT_MASK, 0);
 +	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
 +	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
 +	REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
 +/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
 +/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
 +	REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
 +	REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
 +	REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
 +/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
 +/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
 +	REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
 +	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
 +	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
 +	REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
 +/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
 +/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
 +
 +	if (CHIP_REV_IS_FPGA(bp))
 +		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
 +	else if (!CHIP_IS_E1x(bp))
 +		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
 +			   (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
 +				| PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
 +				| PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
 +				| PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
 +				| PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
 +	else
 +		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
 +	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
 +	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
 +	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
 +/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
 +
 +	if (!CHIP_IS_E1x(bp))
 +		/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
 +		REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
 +
 +	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
 +	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
 +/*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
 +	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);		/* bit 3,4 masked */
 +}
 +
 +static void bnx2x_reset_common(struct bnx2x *bp)
 +{
 +	u32 val = 0x1400;
 +
 +	/* reset_common */
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 +	       0xd3ffff7f);
 +
 +	if (CHIP_IS_E3(bp)) {
 +		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
 +		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
 +	}
 +
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
 +}
 +
 +static void bnx2x_setup_dmae(struct bnx2x *bp)
 +{
 +	bp->dmae_ready = 0;
 +	spin_lock_init(&bp->dmae_lock);
 +}
 +
 +static void bnx2x_init_pxp(struct bnx2x *bp)
 +{
 +	u16 devctl;
 +	int r_order, w_order;
 +
 +	pci_read_config_word(bp->pdev,
 +			     pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
 +	DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
 +	w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
 +	if (bp->mrrs == -1)
 +		r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
 +	else {
 +		DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
 +		r_order = bp->mrrs;
 +	}
 +
 +	bnx2x_init_pxp_arb(bp, r_order, w_order);
 +}
 +
 +static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
 +{
 +	int is_required;
 +	u32 val;
 +	int port;
 +
 +	if (BP_NOMCP(bp))
 +		return;
 +
 +	is_required = 0;
 +	val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
 +	      SHARED_HW_CFG_FAN_FAILURE_MASK;
 +
 +	if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
 +		is_required = 1;
 +
 +	/*
 +	 * The fan failure mechanism is usually related to the PHY type since
 +	 * the power consumption of the board is affected by the PHY. Currently,
 +	 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
 +	 */
 +	else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
 +		for (port = PORT_0; port < PORT_MAX; port++) {
 +			is_required |=
 +				bnx2x_fan_failure_det_req(
 +					bp,
 +					bp->common.shmem_base,
 +					bp->common.shmem2_base,
 +					port);
 +		}
 +
 +	DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
 +
 +	if (is_required == 0)
 +		return;
 +
 +	/* Fan failure is indicated by SPIO 5 */
 +	bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
 +		       MISC_REGISTERS_SPIO_INPUT_HI_Z);
 +
 +	/* set to active low mode */
 +	val = REG_RD(bp, MISC_REG_SPIO_INT);
 +	val |= ((1 << MISC_REGISTERS_SPIO_5) <<
 +					MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
 +	REG_WR(bp, MISC_REG_SPIO_INT, val);
 +
 +	/* enable interrupt to signal the IGU */
 +	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
 +	val |= (1 << MISC_REGISTERS_SPIO_5);
 +	REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
 +}
 +
 +static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
 +{
 +	u32 offset = 0;
 +
 +	if (CHIP_IS_E1(bp))
 +		return;
 +	if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
 +		return;
 +
 +	switch (BP_ABS_FUNC(bp)) {
 +	case 0:
 +		offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
 +		break;
 +	case 1:
 +		offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
 +		break;
 +	case 2:
 +		offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
 +		break;
 +	case 3:
 +		offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
 +		break;
 +	case 4:
 +		offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
 +		break;
 +	case 5:
 +		offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
 +		break;
 +	case 6:
 +		offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
 +		break;
 +	case 7:
 +		offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
 +		break;
 +	default:
 +		return;
 +	}
 +
 +	REG_WR(bp, offset, pretend_func_num);
 +	REG_RD(bp, offset);
 +	DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
 +}
 +
 +void bnx2x_pf_disable(struct bnx2x *bp)
 +{
 +	u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 +	val &= ~IGU_PF_CONF_FUNC_EN;
 +
 +	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 +	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
 +	REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
 +}
 +
 +static inline void bnx2x__common_init_phy(struct bnx2x *bp)
 +{
 +	u32 shmem_base[2], shmem2_base[2];
 +	shmem_base[0] =  bp->common.shmem_base;
 +	shmem2_base[0] = bp->common.shmem2_base;
 +	if (!CHIP_IS_E1x(bp)) {
 +		shmem_base[1] =
 +			SHMEM2_RD(bp, other_shmem_base_addr);
 +		shmem2_base[1] =
 +			SHMEM2_RD(bp, other_shmem2_base_addr);
 +	}
 +	bnx2x_acquire_phy_lock(bp);
 +	bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
 +			      bp->common.chip_id);
 +	bnx2x_release_phy_lock(bp);
 +}
 +
 +/**
 + * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
 + *
 + * @bp:		driver handle
 + */
 +static int bnx2x_init_hw_common(struct bnx2x *bp)
 +{
 +	u32 val;
 +
 +	DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
 +
 +	/*
 +	 * take the UNDI lock to protect undi_unload flow from accessing
 +	 * registers while we're resetting the chip
 +	 */
 +	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +
 +	bnx2x_reset_common(bp);
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
 +
 +	val = 0xfffc;
 +	if (CHIP_IS_E3(bp)) {
 +		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
 +		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
 +	}
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
 +
 +	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +
 +	bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		u8 abs_func_id;
 +
 +		/**
 +		 * 4-port mode or 2-port mode we need to turn of master-enable
 +		 * for everyone, after that, turn it back on for self.
 +		 * so, we disregard multi-function or not, and always disable
 +		 * for all functions on the given path, this means 0,2,4,6 for
 +		 * path 0 and 1,3,5,7 for path 1
 +		 */
 +		for (abs_func_id = BP_PATH(bp);
 +		     abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
 +			if (abs_func_id == BP_ABS_FUNC(bp)) {
 +				REG_WR(bp,
 +				    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
 +				    1);
 +				continue;
 +			}
 +
 +			bnx2x_pretend_func(bp, abs_func_id);
 +			/* clear pf enable */
 +			bnx2x_pf_disable(bp);
 +			bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 +		}
 +	}
 +
 +	bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
 +	if (CHIP_IS_E1(bp)) {
 +		/* enable HW interrupt from PXP on USDM overflow
 +		   bit 16 on INT_MASK_0 */
 +		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
 +	}
 +
 +	bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
 +	bnx2x_init_pxp(bp);
 +
 +#ifdef __BIG_ENDIAN
 +	REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
 +	REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
 +	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
 +	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
 +	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
 +	/* make sure this value is 0 */
 +	REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
 +
 +/*	REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
 +	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
 +	REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
 +	REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
 +	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
 +#endif
 +
 +	bnx2x_ilt_init_page_size(bp, INITOP_SET);
 +
 +	if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
 +		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
 +
 +	/* let the HW do it's magic ... */
 +	msleep(100);
 +	/* finish PXP init */
 +	val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
 +	if (val != 1) {
 +		BNX2X_ERR("PXP2 CFG failed\n");
 +		return -EBUSY;
 +	}
 +	val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
 +	if (val != 1) {
 +		BNX2X_ERR("PXP2 RD_INIT failed\n");
 +		return -EBUSY;
 +	}
 +
 +	/* Timers bug workaround E2 only. We need to set the entire ILT to
 +	 * have entries with value "0" and valid bit on.
 +	 * This needs to be done by the first PF that is loaded in a path
 +	 * (i.e. common phase)
 +	 */
 +	if (!CHIP_IS_E1x(bp)) {
 +/* In E2 there is a bug in the timers block that can cause function 6 / 7
 + * (i.e. vnic3) to start even if it is marked as "scan-off".
 + * This occurs when a different function (func2,3) is being marked
 + * as "scan-off". Real-life scenario for example: if a driver is being
 + * load-unloaded while func6,7 are down. This will cause the timer to access
 + * the ilt, translate to a logical address and send a request to read/write.
 + * Since the ilt for the function that is down is not valid, this will cause
 + * a translation error which is unrecoverable.
 + * The Workaround is intended to make sure that when this happens nothing fatal
 + * will occur. The workaround:
 + *	1.  First PF driver which loads on a path will:
 + *		a.  After taking the chip out of reset, by using pretend,
 + *		    it will write "0" to the following registers of
 + *		    the other vnics.
 + *		    REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
 + *		    REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
 + *		    REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
 + *		    And for itself it will write '1' to
 + *		    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
 + *		    dmae-operations (writing to pram for example.)
 + *		    note: can be done for only function 6,7 but cleaner this
 + *			  way.
 + *		b.  Write zero+valid to the entire ILT.
 + *		c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
 + *		    VNIC3 (of that port). The range allocated will be the
 + *		    entire ILT. This is needed to prevent  ILT range error.
 + *	2.  Any PF driver load flow:
 + *		a.  ILT update with the physical addresses of the allocated
 + *		    logical pages.
 + *		b.  Wait 20msec. - note that this timeout is needed to make
 + *		    sure there are no requests in one of the PXP internal
 + *		    queues with "old" ILT addresses.
 + *		c.  PF enable in the PGLC.
 + *		d.  Clear the was_error of the PF in the PGLC. (could have
 + *		    occured while driver was down)
 + *		e.  PF enable in the CFC (WEAK + STRONG)
 + *		f.  Timers scan enable
 + *	3.  PF driver unload flow:
 + *		a.  Clear the Timers scan_en.
 + *		b.  Polling for scan_on=0 for that PF.
 + *		c.  Clear the PF enable bit in the PXP.
 + *		d.  Clear the PF enable in the CFC (WEAK + STRONG)
 + *		e.  Write zero+valid to all ILT entries (The valid bit must
 + *		    stay set)
 + *		f.  If this is VNIC 3 of a port then also init
 + *		    first_timers_ilt_entry to zero and last_timers_ilt_entry
 + *		    to the last enrty in the ILT.
 + *
 + *	Notes:
 + *	Currently the PF error in the PGLC is non recoverable.
 + *	In the future the there will be a recovery routine for this error.
 + *	Currently attention is masked.
 + *	Having an MCP lock on the load/unload process does not guarantee that
 + *	there is no Timer disable during Func6/7 enable. This is because the
 + *	Timers scan is currently being cleared by the MCP on FLR.
 + *	Step 2.d can be done only for PF6/7 and the driver can also check if
 + *	there is error before clearing it. But the flow above is simpler and
 + *	more general.
 + *	All ILT entries are written by zero+valid and not just PF6/7
 + *	ILT entries since in the future the ILT entries allocation for
 + *	PF-s might be dynamic.
 + */
 +		struct ilt_client_info ilt_cli;
 +		struct bnx2x_ilt ilt;
 +		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
 +		memset(&ilt, 0, sizeof(struct bnx2x_ilt));
 +
 +		/* initialize dummy TM client */
 +		ilt_cli.start = 0;
 +		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
 +		ilt_cli.client_num = ILT_CLIENT_TM;
 +
 +		/* Step 1: set zeroes to all ilt page entries with valid bit on
 +		 * Step 2: set the timers first/last ilt entry to point
 +		 * to the entire range to prevent ILT range error for 3rd/4th
 +		 * vnic	(this code assumes existance of the vnic)
 +		 *
 +		 * both steps performed by call to bnx2x_ilt_client_init_op()
 +		 * with dummy TM client
 +		 *
 +		 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
 +		 * and his brother are split registers
 +		 */
 +		bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
 +		bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
 +		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 +
 +		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
 +		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
 +		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
 +	}
 +
 +
 +	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
 +	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
 +				(CHIP_REV_IS_FPGA(bp) ? 400 : 0);
 +		bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
 +
 +		bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
 +
 +		/* let the HW do it's magic ... */
 +		do {
 +			msleep(200);
 +			val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
 +		} while (factor-- && (val != 1));
 +
 +		if (val != 1) {
 +			BNX2X_ERR("ATC_INIT failed\n");
 +			return -EBUSY;
 +		}
 +	}
 +
 +	bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
 +
 +	/* clean the DMAE memory */
 +	bp->dmae_ready = 1;
 +	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
 +
 +	bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
 +
 +	bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
 +
 +	bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
 +
 +	bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
 +
 +	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
 +	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
 +	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
 +	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
 +
 +	bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
 +
 +
 +	/* QM queues pointers table */
 +	bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
 +
 +	/* soft reset pulse */
 +	REG_WR(bp, QM_REG_SOFT_RESET, 1);
 +	REG_WR(bp, QM_REG_SOFT_RESET, 0);
 +
 +#ifdef BCM_CNIC
 +	bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
 +#endif
 +
 +	bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
 +	REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
 +	if (!CHIP_REV_IS_SLOW(bp))
 +		/* enable hw interrupt from doorbell Q */
 +		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
 +
 +	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
 +
 +	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
 +	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
 +
 +	if (!CHIP_IS_E1(bp))
 +		REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
 +
 +	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
 +		/* Bit-map indicating which L2 hdrs may appear
 +		 * after the basic Ethernet header
 +		 */
 +		REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
 +		       bp->path_has_ovlan ? 7 : 6);
 +
 +	bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		/* reset VFC memories */
 +		REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
 +			   VFC_MEMORIES_RST_REG_CAM_RST |
 +			   VFC_MEMORIES_RST_REG_RAM_RST);
 +		REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
 +			   VFC_MEMORIES_RST_REG_CAM_RST |
 +			   VFC_MEMORIES_RST_REG_RAM_RST);
 +
 +		msleep(20);
 +	}
 +
 +	bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
 +
 +	/* sync semi rtc */
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 +	       0x80000000);
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
 +	       0x80000000);
 +
 +	bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
 +
 +	if (!CHIP_IS_E1x(bp))
 +		REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
 +		       bp->path_has_ovlan ? 7 : 6);
 +
 +	REG_WR(bp, SRC_REG_SOFT_RST, 1);
 +
 +	bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
 +
 +#ifdef BCM_CNIC
 +	REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
 +	REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
 +	REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
 +	REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
 +	REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
 +	REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
 +	REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
 +	REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
 +	REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
 +	REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
 +#endif
 +	REG_WR(bp, SRC_REG_SOFT_RST, 0);
 +
 +	if (sizeof(union cdu_context) != 1024)
 +		/* we currently assume that a context is 1024 bytes */
 +		dev_alert(&bp->pdev->dev, "please adjust the size "
 +					  "of cdu_context(%ld)\n",
 +			 (long)sizeof(union cdu_context));
 +
 +	bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
 +	val = (4 << 24) + (0 << 12) + 1024;
 +	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
 +
 +	bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
 +	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
 +	/* enable context validation interrupt from CFC */
 +	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
 +
 +	/* set the thresholds to prevent CFC/CDU race */
 +	REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
 +
 +	bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
 +
 +	if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
 +		REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
 +
 +	bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
 +	bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
 +
 +	/* Reset PCIE errors for debug */
 +	REG_WR(bp, 0x2814, 0xffffffff);
 +	REG_WR(bp, 0x3820, 0xffffffff);
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
 +			   (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
 +				PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
 +		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
 +			   (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
 +				PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
 +				PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
 +		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
 +			   (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
 +				PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
 +				PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
 +	}
 +
 +	bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
 +	if (!CHIP_IS_E1(bp)) {
 +		/* in E3 this done in per-port section */
 +		if (!CHIP_IS_E3(bp))
 +			REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
 +	}
 +	if (CHIP_IS_E1H(bp))
 +		/* not applicable for E2 (and above ...) */
 +		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
 +
 +	if (CHIP_REV_IS_SLOW(bp))
 +		msleep(200);
 +
 +	/* finish CFC init */
 +	val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
 +	if (val != 1) {
 +		BNX2X_ERR("CFC LL_INIT failed\n");
 +		return -EBUSY;
 +	}
 +	val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
 +	if (val != 1) {
 +		BNX2X_ERR("CFC AC_INIT failed\n");
 +		return -EBUSY;
 +	}
 +	val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
 +	if (val != 1) {
 +		BNX2X_ERR("CFC CAM_INIT failed\n");
 +		return -EBUSY;
 +	}
 +	REG_WR(bp, CFC_REG_DEBUG0, 0);
 +
 +	if (CHIP_IS_E1(bp)) {
 +		/* read NIG statistic
 +		   to see if this is our first up since powerup */
 +		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
 +		val = *bnx2x_sp(bp, wb_data[0]);
 +
 +		/* do internal memory self test */
 +		if ((val == 0) && bnx2x_int_mem_test(bp)) {
 +			BNX2X_ERR("internal mem self test failed\n");
 +			return -EBUSY;
 +		}
 +	}
 +
 +	bnx2x_setup_fan_failure_detection(bp);
 +
 +	/* clear PXP2 attentions */
 +	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
 +
 +	bnx2x_enable_blocks_attention(bp);
 +	bnx2x_enable_blocks_parity(bp);
 +
 +	if (!BP_NOMCP(bp)) {
 +		if (CHIP_IS_E1x(bp))
 +			bnx2x__common_init_phy(bp);
 +	} else
 +		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 +
 +	return 0;
 +}
 +
 +/**
 + * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
 + *
 + * @bp:		driver handle
 + */
 +static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
 +{
 +	int rc = bnx2x_init_hw_common(bp);
 +
 +	if (rc)
 +		return rc;
 +
 +	/* In E2 2-PORT mode, same ext phy is used for the two paths */
 +	if (!BP_NOMCP(bp))
 +		bnx2x__common_init_phy(bp);
 +
 +	return 0;
 +}
 +
 +static int bnx2x_init_hw_port(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
 +	u32 low, high;
 +	u32 val;
 +
 +	bnx2x__link_reset(bp);
 +
 +	DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
 +
 +	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
 +
 +	bnx2x_init_block(bp, BLOCK_MISC, init_phase);
 +	bnx2x_init_block(bp, BLOCK_PXP, init_phase);
 +	bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
 +
 +	/* Timers bug workaround: disables the pf_master bit in pglue at
 +	 * common phase, we need to enable it here before any dmae access are
 +	 * attempted. Therefore we manually added the enable-master to the
 +	 * port phase (it also happens in the function phase)
 +	 */
 +	if (!CHIP_IS_E1x(bp))
 +		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
 +
 +	bnx2x_init_block(bp, BLOCK_ATC, init_phase);
 +	bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
 +	bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
 +	bnx2x_init_block(bp, BLOCK_QM, init_phase);
 +
 +	bnx2x_init_block(bp, BLOCK_TCM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_UCM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_CCM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_XCM, init_phase);
 +
 +	/* QM cid (connection) count */
 +	bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
 +
 +#ifdef BCM_CNIC
 +	bnx2x_init_block(bp, BLOCK_TM, init_phase);
 +	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
 +	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
 +#endif
 +
 +	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
 +
 +	if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
 +		bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
 +
 +		if (IS_MF(bp))
 +			low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
 +		else if (bp->dev->mtu > 4096) {
 +			if (bp->flags & ONE_PORT_FLAG)
 +				low = 160;
 +			else {
 +				val = bp->dev->mtu;
 +				/* (24*1024 + val*4)/256 */
 +				low = 96 + (val/64) +
 +						((val % 64) ? 1 : 0);
 +			}
 +		} else
 +			low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
 +		high = low + 56;	/* 14*1024/256 */
 +		REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
 +		REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
 +	}
 +
 +	if (CHIP_MODE_IS_4_PORT(bp))
 +		REG_WR(bp, (BP_PORT(bp) ?
 +			    BRB1_REG_MAC_GUARANTIED_1 :
 +			    BRB1_REG_MAC_GUARANTIED_0), 40);
 +
 +
 +	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
 +	if (CHIP_IS_E3B0(bp))
 +		/* Ovlan exists only if we are in multi-function +
 +		 * switch-dependent mode, in switch-independent there
 +		 * is no ovlan headers
 +		 */
 +		REG_WR(bp, BP_PORT(bp) ?
 +		       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
 +		       PRS_REG_HDRS_AFTER_BASIC_PORT_0,
 +		       (bp->path_has_ovlan ? 7 : 6));
 +
 +	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_USDM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
 +
 +	bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_USEM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
 +
 +	bnx2x_init_block(bp, BLOCK_UPB, init_phase);
 +	bnx2x_init_block(bp, BLOCK_XPB, init_phase);
 +
 +	bnx2x_init_block(bp, BLOCK_PBF, init_phase);
 +
 +	if (CHIP_IS_E1x(bp)) {
 +		/* configure PBF to work without PAUSE mtu 9000 */
 +		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
 +
 +		/* update threshold */
 +		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
 +		/* update init credit */
 +		REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
 +
 +		/* probe changes */
 +		REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
 +		udelay(50);
 +		REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
 +	}
 +
 +#ifdef BCM_CNIC
 +	bnx2x_init_block(bp, BLOCK_SRC, init_phase);
 +#endif
 +	bnx2x_init_block(bp, BLOCK_CDU, init_phase);
 +	bnx2x_init_block(bp, BLOCK_CFC, init_phase);
 +
 +	if (CHIP_IS_E1(bp)) {
 +		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
 +		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 +	}
 +	bnx2x_init_block(bp, BLOCK_HC, init_phase);
 +
 +	bnx2x_init_block(bp, BLOCK_IGU, init_phase);
 +
 +	bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
 +	/* init aeu_mask_attn_func_0/1:
 +	 *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
 +	 *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
 +	 *             bits 4-7 are used for "per vn group attention" */
 +	val = IS_MF(bp) ? 0xF7 : 0x7;
 +	/* Enable DCBX attention for all but E1 */
 +	val |= CHIP_IS_E1(bp) ? 0 : 0x10;
 +	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
 +
 +	bnx2x_init_block(bp, BLOCK_NIG, init_phase);
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		/* Bit-map indicating which L2 hdrs may appear after the
 +		 * basic Ethernet header
 +		 */
 +		REG_WR(bp, BP_PORT(bp) ?
 +			   NIG_REG_P1_HDRS_AFTER_BASIC :
 +			   NIG_REG_P0_HDRS_AFTER_BASIC,
 +			   IS_MF_SD(bp) ? 7 : 6);
 +
 +		if (CHIP_IS_E3(bp))
 +			REG_WR(bp, BP_PORT(bp) ?
 +				   NIG_REG_LLH1_MF_MODE :
 +				   NIG_REG_LLH_MF_MODE, IS_MF(bp));
 +	}
 +	if (!CHIP_IS_E3(bp))
 +		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
 +
 +	if (!CHIP_IS_E1(bp)) {
 +		/* 0x2 disable mf_ov, 0x1 enable */
 +		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
 +		       (IS_MF_SD(bp) ? 0x1 : 0x2));
 +
 +		if (!CHIP_IS_E1x(bp)) {
 +			val = 0;
 +			switch (bp->mf_mode) {
 +			case MULTI_FUNCTION_SD:
 +				val = 1;
 +				break;
 +			case MULTI_FUNCTION_SI:
 +				val = 2;
 +				break;
 +			}
 +
 +			REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
 +						  NIG_REG_LLH0_CLS_TYPE), val);
 +		}
 +		{
 +			REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
 +			REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
 +			REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
 +		}
 +	}
 +
 +
 +	/* If SPIO5 is set to generate interrupts, enable it for this port */
 +	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
 +	if (val & (1 << MISC_REGISTERS_SPIO_5)) {
 +		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
 +				       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
 +		val = REG_RD(bp, reg_addr);
 +		val |= AEU_INPUTS_ATTN_BITS_SPIO5;
 +		REG_WR(bp, reg_addr, val);
 +	}
 +
 +	return 0;
 +}
 +
 +static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
 +{
 +	int reg;
 +
 +	if (CHIP_IS_E1(bp))
 +		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
 +	else
 +		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
 +
 +	bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
 +}
 +
 +static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
 +{
 +	bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
 +}
 +
 +static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
 +{
 +	u32 i, base = FUNC_ILT_BASE(func);
 +	for (i = base; i < base + ILT_PER_FUNC; i++)
 +		bnx2x_ilt_wr(bp, i, 0);
 +}
 +
 +static int bnx2x_init_hw_func(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	int func = BP_FUNC(bp);
 +	int init_phase = PHASE_PF0 + func;
 +	struct bnx2x_ilt *ilt = BP_ILT(bp);
 +	u16 cdu_ilt_start;
 +	u32 addr, val;
 +	u32 main_mem_base, main_mem_size, main_mem_prty_clr;
 +	int i, main_mem_width;
 +
 +	DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
 +
 +	/* FLR cleanup - hmmm */
 +	if (!CHIP_IS_E1x(bp))
 +		bnx2x_pf_flr_clnup(bp);
 +
 +	/* set MSI reconfigure capability */
 +	if (bp->common.int_block == INT_BLOCK_HC) {
 +		addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
 +		val = REG_RD(bp, addr);
 +		val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
 +		REG_WR(bp, addr, val);
 +	}
 +
 +	bnx2x_init_block(bp, BLOCK_PXP, init_phase);
 +	bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
 +
 +	ilt = BP_ILT(bp);
 +	cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
 +
 +	for (i = 0; i < L2_ILT_LINES(bp); i++) {
 +		ilt->lines[cdu_ilt_start + i].page =
 +			bp->context.vcxt + (ILT_PAGE_CIDS * i);
 +		ilt->lines[cdu_ilt_start + i].page_mapping =
 +			bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
 +		/* cdu ilt pages are allocated manually so there's no need to
 +		set the size */
 +	}
 +	bnx2x_ilt_init_op(bp, INITOP_SET);
 +
 +#ifdef BCM_CNIC
 +	bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
 +
 +	/* T1 hash bits value determines the T1 number of entries */
 +	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
 +#endif
 +
 +#ifndef BCM_CNIC
 +	/* set NIC mode */
 +	REG_WR(bp, PRS_REG_NIC_MODE, 1);
 +#endif  /* BCM_CNIC */
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		u32 pf_conf = IGU_PF_CONF_FUNC_EN;
 +
 +		/* Turn on a single ISR mode in IGU if driver is going to use
 +		 * INT#x or MSI
 +		 */
 +		if (!(bp->flags & USING_MSIX_FLAG))
 +			pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
 +		/*
 +		 * Timers workaround bug: function init part.
 +		 * Need to wait 20msec after initializing ILT,
 +		 * needed to make sure there are no requests in
 +		 * one of the PXP internal queues with "old" ILT addresses
 +		 */
 +		msleep(20);
 +		/*
 +		 * Master enable - Due to WB DMAE writes performed before this
 +		 * register is re-initialized as part of the regular function
 +		 * init
 +		 */
 +		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
 +		/* Enable the function in IGU */
 +		REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
 +	}
 +
 +	bp->dmae_ready = 1;
 +
 +	bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
 +
 +	if (!CHIP_IS_E1x(bp))
 +		REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
 +
 +	bnx2x_init_block(bp, BLOCK_ATC, init_phase);
 +	bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
 +	bnx2x_init_block(bp, BLOCK_NIG, init_phase);
 +	bnx2x_init_block(bp, BLOCK_SRC, init_phase);
 +	bnx2x_init_block(bp, BLOCK_MISC, init_phase);
 +	bnx2x_init_block(bp, BLOCK_TCM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_UCM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_CCM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_XCM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_USEM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
 +
 +	if (!CHIP_IS_E1x(bp))
 +		REG_WR(bp, QM_REG_PF_EN, 1);
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
 +		REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
 +		REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
 +		REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
 +	}
 +	bnx2x_init_block(bp, BLOCK_QM, init_phase);
 +
 +	bnx2x_init_block(bp, BLOCK_TM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
 +	bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
 +	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
 +	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_USDM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
 +	bnx2x_init_block(bp, BLOCK_UPB, init_phase);
 +	bnx2x_init_block(bp, BLOCK_XPB, init_phase);
 +	bnx2x_init_block(bp, BLOCK_PBF, init_phase);
 +	if (!CHIP_IS_E1x(bp))
 +		REG_WR(bp, PBF_REG_DISABLE_PF, 0);
 +
 +	bnx2x_init_block(bp, BLOCK_CDU, init_phase);
 +
 +	bnx2x_init_block(bp, BLOCK_CFC, init_phase);
 +
 +	if (!CHIP_IS_E1x(bp))
 +		REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
 +
 +	if (IS_MF(bp)) {
 +		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
 +		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
 +	}
 +
 +	bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
 +
 +	/* HC init per function */
 +	if (bp->common.int_block == INT_BLOCK_HC) {
 +		if (CHIP_IS_E1H(bp)) {
 +			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
 +
 +			REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
 +			REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 +		}
 +		bnx2x_init_block(bp, BLOCK_HC, init_phase);
 +
 +	} else {
 +		int num_segs, sb_idx, prod_offset;
 +
 +		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
 +
 +		if (!CHIP_IS_E1x(bp)) {
 +			REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
 +			REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
 +		}
 +
 +		bnx2x_init_block(bp, BLOCK_IGU, init_phase);
 +
 +		if (!CHIP_IS_E1x(bp)) {
 +			int dsb_idx = 0;
 +			/**
 +			 * Producer memory:
 +			 * E2 mode: address 0-135 match to the mapping memory;
 +			 * 136 - PF0 default prod; 137 - PF1 default prod;
 +			 * 138 - PF2 default prod; 139 - PF3 default prod;
 +			 * 140 - PF0 attn prod;    141 - PF1 attn prod;
 +			 * 142 - PF2 attn prod;    143 - PF3 attn prod;
 +			 * 144-147 reserved.
 +			 *
 +			 * E1.5 mode - In backward compatible mode;
 +			 * for non default SB; each even line in the memory
 +			 * holds the U producer and each odd line hold
 +			 * the C producer. The first 128 producers are for
 +			 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
 +			 * producers are for the DSB for each PF.
 +			 * Each PF has five segments: (the order inside each
 +			 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
 +			 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
 +			 * 144-147 attn prods;
 +			 */
 +			/* non-default-status-blocks */
 +			num_segs = CHIP_INT_MODE_IS_BC(bp) ?
 +				IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
 +			for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
 +				prod_offset = (bp->igu_base_sb + sb_idx) *
 +					num_segs;
 +
 +				for (i = 0; i < num_segs; i++) {
 +					addr = IGU_REG_PROD_CONS_MEMORY +
 +							(prod_offset + i) * 4;
 +					REG_WR(bp, addr, 0);
 +				}
 +				/* send consumer update with value 0 */
 +				bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
 +					     USTORM_ID, 0, IGU_INT_NOP, 1);
 +				bnx2x_igu_clear_sb(bp,
 +						   bp->igu_base_sb + sb_idx);
 +			}
 +
 +			/* default-status-blocks */
 +			num_segs = CHIP_INT_MODE_IS_BC(bp) ?
 +				IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
 +
 +			if (CHIP_MODE_IS_4_PORT(bp))
 +				dsb_idx = BP_FUNC(bp);
 +			else
 +				dsb_idx = BP_VN(bp);
 +
 +			prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
 +				       IGU_BC_BASE_DSB_PROD + dsb_idx :
 +				       IGU_NORM_BASE_DSB_PROD + dsb_idx);
 +
 +			/*
 +			 * igu prods come in chunks of E1HVN_MAX (4) -
 +			 * does not matters what is the current chip mode
 +			 */
 +			for (i = 0; i < (num_segs * E1HVN_MAX);
 +			     i += E1HVN_MAX) {
 +				addr = IGU_REG_PROD_CONS_MEMORY +
 +							(prod_offset + i)*4;
 +				REG_WR(bp, addr, 0);
 +			}
 +			/* send consumer update with 0 */
 +			if (CHIP_INT_MODE_IS_BC(bp)) {
 +				bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +					     USTORM_ID, 0, IGU_INT_NOP, 1);
 +				bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +					     CSTORM_ID, 0, IGU_INT_NOP, 1);
 +				bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +					     XSTORM_ID, 0, IGU_INT_NOP, 1);
 +				bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +					     TSTORM_ID, 0, IGU_INT_NOP, 1);
 +				bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +					     ATTENTION_ID, 0, IGU_INT_NOP, 1);
 +			} else {
 +				bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +					     USTORM_ID, 0, IGU_INT_NOP, 1);
 +				bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +					     ATTENTION_ID, 0, IGU_INT_NOP, 1);
 +			}
 +			bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
 +
 +			/* !!! these should become driver const once
 +			   rf-tool supports split-68 const */
 +			REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
 +			REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
 +			REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
 +			REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
 +			REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
 +			REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
 +		}
 +	}
 +
 +	/* Reset PCIE errors for debug */
 +	REG_WR(bp, 0x2114, 0xffffffff);
 +	REG_WR(bp, 0x2120, 0xffffffff);
 +
 +	if (CHIP_IS_E1x(bp)) {
 +		main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
 +		main_mem_base = HC_REG_MAIN_MEMORY +
 +				BP_PORT(bp) * (main_mem_size * 4);
 +		main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
 +		main_mem_width = 8;
 +
 +		val = REG_RD(bp, main_mem_prty_clr);
 +		if (val)
 +			DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
 +					  "block during "
 +					  "function init (0x%x)!\n", val);
 +
 +		/* Clear "false" parity errors in MSI-X table */
 +		for (i = main_mem_base;
 +		     i < main_mem_base + main_mem_size * 4;
 +		     i += main_mem_width) {
 +			bnx2x_read_dmae(bp, i, main_mem_width / 4);
 +			bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
 +					 i, main_mem_width / 4);
 +		}
 +		/* Clear HC parity attention */
 +		REG_RD(bp, main_mem_prty_clr);
 +	}
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +	/* Enable STORMs SP logging */
 +	REG_WR8(bp, BAR_USTRORM_INTMEM +
 +	       USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
 +	REG_WR8(bp, BAR_TSTRORM_INTMEM +
 +	       TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
 +	REG_WR8(bp, BAR_CSTRORM_INTMEM +
 +	       CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
 +	REG_WR8(bp, BAR_XSTRORM_INTMEM +
 +	       XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
 +#endif
 +
 +	bnx2x_phy_probe(&bp->link_params);
 +
 +	return 0;
 +}
 +
 +
 +void bnx2x_free_mem(struct bnx2x *bp)
 +{
 +	/* fastpath */
 +	bnx2x_free_fp_mem(bp);
 +	/* end of fastpath */
 +
 +	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
 +		       sizeof(struct host_sp_status_block));
 +
 +	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
 +		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
 +
 +	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
 +		       sizeof(struct bnx2x_slowpath));
 +
 +	BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
 +		       bp->context.size);
 +
 +	bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
 +
 +	BNX2X_FREE(bp->ilt->lines);
 +
 +#ifdef BCM_CNIC
 +	if (!CHIP_IS_E1x(bp))
 +		BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
 +			       sizeof(struct host_hc_status_block_e2));
 +	else
 +		BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
 +			       sizeof(struct host_hc_status_block_e1x));
 +
 +	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
 +#endif
 +
 +	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
 +
 +	BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
 +		       BCM_PAGE_SIZE * NUM_EQ_PAGES);
 +}
 +
 +static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 +{
 +	int num_groups;
 +
 +	/* number of eth_queues */
 +	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
 +
 +	/* Total number of FW statistics requests =
 +	 * 1 for port stats + 1 for PF stats + num_eth_queues */
 +	bp->fw_stats_num = 2 + num_queue_stats;
 +
 +
 +	/* Request is built from stats_query_header and an array of
 +	 * stats_query_cmd_group each of which contains
 +	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
 +	 * configured in the stats_query_header.
 +	 */
 +	num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
 +		(((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
 +
 +	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
 +			num_groups * sizeof(struct stats_query_cmd_group);
 +
 +	/* Data for statistics requests + stats_conter
 +	 *
 +	 * stats_counter holds per-STORM counters that are incremented
 +	 * when STORM has finished with the current request.
 +	 */
 +	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
 +		sizeof(struct per_pf_stats) +
 +		sizeof(struct per_queue_stats) * num_queue_stats +
 +		sizeof(struct stats_counter);
 +
 +	BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
 +			bp->fw_stats_data_sz + bp->fw_stats_req_sz);
 +
 +	/* Set shortcuts */
 +	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
 +	bp->fw_stats_req_mapping = bp->fw_stats_mapping;
 +
 +	bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
 +		((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
 +
 +	bp->fw_stats_data_mapping = bp->fw_stats_mapping +
 +				   bp->fw_stats_req_sz;
 +	return 0;
 +
 +alloc_mem_err:
 +	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
 +		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
 +	return -ENOMEM;
 +}
 +
 +
 +int bnx2x_alloc_mem(struct bnx2x *bp)
 +{
 +#ifdef BCM_CNIC
 +	if (!CHIP_IS_E1x(bp))
 +		/* size = the status block + ramrod buffers */
 +		BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
 +				sizeof(struct host_hc_status_block_e2));
 +	else
 +		BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
 +				sizeof(struct host_hc_status_block_e1x));
 +
 +	/* allocate searcher T2 table */
 +	BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
 +#endif
 +
 +
 +	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
 +			sizeof(struct host_sp_status_block));
 +
 +	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
 +			sizeof(struct bnx2x_slowpath));
 +
 +	/* Allocated memory for FW statistics  */
 +	if (bnx2x_alloc_fw_stats_mem(bp))
 +		goto alloc_mem_err;
 +
 +	bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
 +
 +	BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
 +			bp->context.size);
 +
 +	BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
 +
 +	if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
 +		goto alloc_mem_err;
 +
 +	/* Slow path ring */
 +	BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
 +
 +	/* EQ */
 +	BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
 +			BCM_PAGE_SIZE * NUM_EQ_PAGES);
 +
 +
 +	/* fastpath */
 +	/* need to be done at the end, since it's self adjusting to amount
 +	 * of memory available for RSS queues
 +	 */
 +	if (bnx2x_alloc_fp_mem(bp))
 +		goto alloc_mem_err;
 +	return 0;
 +
 +alloc_mem_err:
 +	bnx2x_free_mem(bp);
 +	return -ENOMEM;
 +}
 +
 +/*
 + * Init service functions
 + */
 +
 +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
 +		      struct bnx2x_vlan_mac_obj *obj, bool set,
 +		      int mac_type, unsigned long *ramrod_flags)
 +{
 +	int rc;
 +	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
 +
 +	memset(&ramrod_param, 0, sizeof(ramrod_param));
 +
 +	/* Fill general parameters */
 +	ramrod_param.vlan_mac_obj = obj;
 +	ramrod_param.ramrod_flags = *ramrod_flags;
 +
 +	/* Fill a user request section if needed */
 +	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
 +		memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
 +
 +		__set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
 +
 +		/* Set the command: ADD or DEL */
 +		if (set)
 +			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
 +		else
 +			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 +	}
 +
 +	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
 +	if (rc < 0)
 +		BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
 +	return rc;
 +}
 +
 +int bnx2x_del_all_macs(struct bnx2x *bp,
 +		       struct bnx2x_vlan_mac_obj *mac_obj,
 +		       int mac_type, bool wait_for_comp)
 +{
 +	int rc;
 +	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
 +
 +	/* Wait for completion of requested */
 +	if (wait_for_comp)
 +		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 +
 +	/* Set the mac type of addresses we want to clear */
 +	__set_bit(mac_type, &vlan_mac_flags);
 +
 +	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
 +	if (rc < 0)
 +		BNX2X_ERR("Failed to delete MACs: %d\n", rc);
 +
 +	return rc;
 +}
 +
 +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
 +{
 +	unsigned long ramrod_flags = 0;
 +
 +	DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 +
 +	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 +	/* Eth MAC is set on RSS leading client (fp[0]) */
 +	return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
 +				 BNX2X_ETH_MAC, &ramrod_flags);
 +}
 +
 +int bnx2x_setup_leading(struct bnx2x *bp)
 +{
 +	return bnx2x_setup_queue(bp, &bp->fp[0], 1);
 +}
 +
 +/**
 + * bnx2x_set_int_mode - configure interrupt mode
 + *
 + * @bp:		driver handle
 + *
 + * In case of MSI-X it will also try to enable MSI-X.
 + */
 +static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
 +{
 +	switch (int_mode) {
 +	case INT_MODE_MSI:
 +		bnx2x_enable_msi(bp);
 +		/* falling through... */
 +	case INT_MODE_INTx:
 +		bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
 +		DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
 +		break;
 +	default:
 +		/* Set number of queues according to bp->multi_mode value */
 +		bnx2x_set_num_queues(bp);
 +
 +		DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
 +		   bp->num_queues);
 +
 +		/* if we can't use MSI-X we only need one fp,
 +		 * so try to enable MSI-X with the requested number of fp's
 +		 * and fallback to MSI or legacy INTx with one fp
 +		 */
 +		if (bnx2x_enable_msix(bp)) {
 +			/* failed to enable MSI-X */
 +			if (bp->multi_mode)
 +				DP(NETIF_MSG_IFUP,
 +					  "Multi requested but failed to "
 +					  "enable MSI-X (%d), "
 +					  "set number of queues to %d\n",
 +				   bp->num_queues,
 +				   1 + NON_ETH_CONTEXT_USE);
 +			bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
 +
 +			/* Try to enable MSI */
 +			if (!(bp->flags & DISABLE_MSI_FLAG))
 +				bnx2x_enable_msi(bp);
 +		}
 +		break;
 +	}
 +}
 +
 +/* must be called prioir to any HW initializations */
 +static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
 +{
 +	return L2_ILT_LINES(bp);
 +}
 +
 +void bnx2x_ilt_set_info(struct bnx2x *bp)
 +{
 +	struct ilt_client_info *ilt_client;
 +	struct bnx2x_ilt *ilt = BP_ILT(bp);
 +	u16 line = 0;
 +
 +	ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
 +	DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
 +
 +	/* CDU */
 +	ilt_client = &ilt->clients[ILT_CLIENT_CDU];
 +	ilt_client->client_num = ILT_CLIENT_CDU;
 +	ilt_client->page_size = CDU_ILT_PAGE_SZ;
 +	ilt_client->flags = ILT_CLIENT_SKIP_MEM;
 +	ilt_client->start = line;
 +	line += bnx2x_cid_ilt_lines(bp);
 +#ifdef BCM_CNIC
 +	line += CNIC_ILT_LINES;
 +#endif
 +	ilt_client->end = line - 1;
 +
 +	DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
 +					 "flags 0x%x, hw psz %d\n",
 +	   ilt_client->start,
 +	   ilt_client->end,
 +	   ilt_client->page_size,
 +	   ilt_client->flags,
 +	   ilog2(ilt_client->page_size >> 12));
 +
 +	/* QM */
 +	if (QM_INIT(bp->qm_cid_count)) {
 +		ilt_client = &ilt->clients[ILT_CLIENT_QM];
 +		ilt_client->client_num = ILT_CLIENT_QM;
 +		ilt_client->page_size = QM_ILT_PAGE_SZ;
 +		ilt_client->flags = 0;
 +		ilt_client->start = line;
 +
 +		/* 4 bytes for each cid */
 +		line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
 +							 QM_ILT_PAGE_SZ);
 +
 +		ilt_client->end = line - 1;
 +
 +		DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
 +						 "flags 0x%x, hw psz %d\n",
 +		   ilt_client->start,
 +		   ilt_client->end,
 +		   ilt_client->page_size,
 +		   ilt_client->flags,
 +		   ilog2(ilt_client->page_size >> 12));
 +
 +	}
 +	/* SRC */
 +	ilt_client = &ilt->clients[ILT_CLIENT_SRC];
 +#ifdef BCM_CNIC
 +	ilt_client->client_num = ILT_CLIENT_SRC;
 +	ilt_client->page_size = SRC_ILT_PAGE_SZ;
 +	ilt_client->flags = 0;
 +	ilt_client->start = line;
 +	line += SRC_ILT_LINES;
 +	ilt_client->end = line - 1;
 +
 +	DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
 +					 "flags 0x%x, hw psz %d\n",
 +	   ilt_client->start,
 +	   ilt_client->end,
 +	   ilt_client->page_size,
 +	   ilt_client->flags,
 +	   ilog2(ilt_client->page_size >> 12));
 +
 +#else
 +	ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
 +#endif
 +
 +	/* TM */
 +	ilt_client = &ilt->clients[ILT_CLIENT_TM];
 +#ifdef BCM_CNIC
 +	ilt_client->client_num = ILT_CLIENT_TM;
 +	ilt_client->page_size = TM_ILT_PAGE_SZ;
 +	ilt_client->flags = 0;
 +	ilt_client->start = line;
 +	line += TM_ILT_LINES;
 +	ilt_client->end = line - 1;
 +
 +	DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
 +					 "flags 0x%x, hw psz %d\n",
 +	   ilt_client->start,
 +	   ilt_client->end,
 +	   ilt_client->page_size,
 +	   ilt_client->flags,
 +	   ilog2(ilt_client->page_size >> 12));
 +
 +#else
 +	ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
 +#endif
 +	BUG_ON(line > ILT_MAX_LINES);
 +}
 +
 +/**
 + * bnx2x_pf_q_prep_init - prepare INIT transition parameters
 + *
 + * @bp:			driver handle
 + * @fp:			pointer to fastpath
 + * @init_params:	pointer to parameters structure
 + *
 + * parameters configured:
 + *      - HC configuration
 + *      - Queue's CDU context
 + */
 +static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
 +	struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
 +{
 +
 +	u8 cos;
 +	/* FCoE Queue uses Default SB, thus has no HC capabilities */
 +	if (!IS_FCOE_FP(fp)) {
 +		__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
 +		__set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
 +
 +		/* If HC is supporterd, enable host coalescing in the transition
 +		 * to INIT state.
 +		 */
 +		__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
 +		__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
 +
 +		/* HC rate */
 +		init_params->rx.hc_rate = bp->rx_ticks ?
 +			(1000000 / bp->rx_ticks) : 0;
 +		init_params->tx.hc_rate = bp->tx_ticks ?
 +			(1000000 / bp->tx_ticks) : 0;
 +
 +		/* FW SB ID */
 +		init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
 +			fp->fw_sb_id;
 +
 +		/*
 +		 * CQ index among the SB indices: FCoE clients uses the default
 +		 * SB, therefore it's different.
 +		 */
 +		init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
 +		init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
 +	}
 +
 +	/* set maximum number of COSs supported by this queue */
 +	init_params->max_cos = fp->max_cos;
 +
 +	DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n",
 +	    fp->index, init_params->max_cos);
 +
 +	/* set the context pointers queue object */
 +	for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
 +		init_params->cxts[cos] =
 +			&bp->context.vcxt[fp->txdata[cos].cid].eth;
 +}
 +
 +int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +			struct bnx2x_queue_state_params *q_params,
 +			struct bnx2x_queue_setup_tx_only_params *tx_only_params,
 +			int tx_index, bool leading)
 +{
 +	memset(tx_only_params, 0, sizeof(*tx_only_params));
 +
 +	/* Set the command */
 +	q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
 +
 +	/* Set tx-only QUEUE flags: don't zero statistics */
 +	tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
 +
 +	/* choose the index of the cid to send the slow path on */
 +	tx_only_params->cid_index = tx_index;
 +
 +	/* Set general TX_ONLY_SETUP parameters */
 +	bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
 +
 +	/* Set Tx TX_ONLY_SETUP parameters */
 +	bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
 +
 +	DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
 +			 "cos %d, primary cid %d, cid %d, "
 +			 "client id %d, sp-client id %d, flags %lx\n",
 +	   tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
 +	   q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
 +	   tx_only_params->gen_params.spcl_id, tx_only_params->flags);
 +
 +	/* send the ramrod */
 +	return bnx2x_queue_state_change(bp, q_params);
 +}
 +
 +
 +/**
 + * bnx2x_setup_queue - setup queue
 + *
 + * @bp:		driver handle
 + * @fp:		pointer to fastpath
 + * @leading:	is leading
 + *
 + * This function performs 2 steps in a Queue state machine
 + *      actually: 1) RESET->INIT 2) INIT->SETUP
 + */
 +
 +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +		       bool leading)
 +{
 +	struct bnx2x_queue_state_params q_params = {0};
 +	struct bnx2x_queue_setup_params *setup_params =
 +						&q_params.params.setup;
 +	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
 +						&q_params.params.tx_only;
 +	int rc;
 +	u8 tx_index;
 +
 +	DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index);
 +
 +	/* reset IGU state skip FCoE L2 queue */
 +	if (!IS_FCOE_FP(fp))
 +		bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
 +			     IGU_INT_ENABLE, 0);
 +
 +	q_params.q_obj = &fp->q_obj;
 +	/* We want to wait for completion in this context */
 +	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 +
 +	/* Prepare the INIT parameters */
 +	bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
 +
 +	/* Set the command */
 +	q_params.cmd = BNX2X_Q_CMD_INIT;
 +
 +	/* Change the state to INIT */
 +	rc = bnx2x_queue_state_change(bp, &q_params);
 +	if (rc) {
 +		BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
 +		return rc;
 +	}
 +
 +	DP(BNX2X_MSG_SP, "init complete\n");
 +
 +
 +	/* Now move the Queue to the SETUP state... */
 +	memset(setup_params, 0, sizeof(*setup_params));
 +
 +	/* Set QUEUE flags */
 +	setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
 +
 +	/* Set general SETUP parameters */
 +	bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
 +				FIRST_TX_COS_INDEX);
 +
 +	bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
 +			    &setup_params->rxq_params);
 +
 +	bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
 +			   FIRST_TX_COS_INDEX);
 +
 +	/* Set the command */
 +	q_params.cmd = BNX2X_Q_CMD_SETUP;
 +
 +	/* Change the state to SETUP */
 +	rc = bnx2x_queue_state_change(bp, &q_params);
 +	if (rc) {
 +		BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
 +		return rc;
 +	}
 +
 +	/* loop through the relevant tx-only indices */
 +	for (tx_index = FIRST_TX_ONLY_COS_INDEX;
 +	      tx_index < fp->max_cos;
 +	      tx_index++) {
 +
 +		/* prepare and send tx-only ramrod*/
 +		rc = bnx2x_setup_tx_only(bp, fp, &q_params,
 +					  tx_only_params, tx_index, leading);
 +		if (rc) {
 +			BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
 +				  fp->index, tx_index);
 +			return rc;
 +		}
 +	}
 +
 +	return rc;
 +}
 +
 +static int bnx2x_stop_queue(struct bnx2x *bp, int index)
 +{
 +	struct bnx2x_fastpath *fp = &bp->fp[index];
 +	struct bnx2x_fp_txdata *txdata;
 +	struct bnx2x_queue_state_params q_params = {0};
 +	int rc, tx_index;
 +
 +	DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid);
 +
 +	q_params.q_obj = &fp->q_obj;
 +	/* We want to wait for completion in this context */
 +	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 +
 +
 +	/* close tx-only connections */
 +	for (tx_index = FIRST_TX_ONLY_COS_INDEX;
 +	     tx_index < fp->max_cos;
 +	     tx_index++){
 +
 +		/* ascertain this is a normal queue*/
 +		txdata = &fp->txdata[tx_index];
 +
 +		DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n",
 +							txdata->txq_index);
 +
 +		/* send halt terminate on tx-only connection */
 +		q_params.cmd = BNX2X_Q_CMD_TERMINATE;
 +		memset(&q_params.params.terminate, 0,
 +		       sizeof(q_params.params.terminate));
 +		q_params.params.terminate.cid_index = tx_index;
 +
 +		rc = bnx2x_queue_state_change(bp, &q_params);
 +		if (rc)
 +			return rc;
 +
 +		/* send halt terminate on tx-only connection */
 +		q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
 +		memset(&q_params.params.cfc_del, 0,
 +		       sizeof(q_params.params.cfc_del));
 +		q_params.params.cfc_del.cid_index = tx_index;
 +		rc = bnx2x_queue_state_change(bp, &q_params);
 +		if (rc)
 +			return rc;
 +	}
 +	/* Stop the primary connection: */
 +	/* ...halt the connection */
 +	q_params.cmd = BNX2X_Q_CMD_HALT;
 +	rc = bnx2x_queue_state_change(bp, &q_params);
 +	if (rc)
 +		return rc;
 +
 +	/* ...terminate the connection */
 +	q_params.cmd = BNX2X_Q_CMD_TERMINATE;
 +	memset(&q_params.params.terminate, 0,
 +	       sizeof(q_params.params.terminate));
 +	q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
 +	rc = bnx2x_queue_state_change(bp, &q_params);
 +	if (rc)
 +		return rc;
 +	/* ...delete cfc entry */
 +	q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
 +	memset(&q_params.params.cfc_del, 0,
 +	       sizeof(q_params.params.cfc_del));
 +	q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
 +	return bnx2x_queue_state_change(bp, &q_params);
 +}
 +
 +
 +static void bnx2x_reset_func(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	int func = BP_FUNC(bp);
 +	int i;
 +
 +	/* Disable the function in the FW */
 +	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
 +	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
 +	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
 +	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
 +
 +	/* FP SBs */
 +	for_each_eth_queue(bp, i) {
 +		struct bnx2x_fastpath *fp = &bp->fp[i];
 +		REG_WR8(bp, BAR_CSTRORM_INTMEM +
 +			   CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
 +			   SB_DISABLED);
 +	}
 +
 +#ifdef BCM_CNIC
 +	/* CNIC SB */
 +	REG_WR8(bp, BAR_CSTRORM_INTMEM +
 +		CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
 +		SB_DISABLED);
 +#endif
 +	/* SP SB */
 +	REG_WR8(bp, BAR_CSTRORM_INTMEM +
 +		   CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
 +		   SB_DISABLED);
 +
 +	for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
 +		REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
 +		       0);
 +
 +	/* Configure IGU */
 +	if (bp->common.int_block == INT_BLOCK_HC) {
 +		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
 +		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 +	} else {
 +		REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
 +		REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
 +	}
 +
 +#ifdef BCM_CNIC
 +	/* Disable Timer scan */
 +	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
 +	/*
 +	 * Wait for at least 10ms and up to 2 second for the timers scan to
 +	 * complete
 +	 */
 +	for (i = 0; i < 200; i++) {
 +		msleep(10);
 +		if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
 +			break;
 +	}
 +#endif
 +	/* Clear ILT */
 +	bnx2x_clear_func_ilt(bp, func);
 +
 +	/* Timers workaround bug for E2: if this is vnic-3,
 +	 * we need to set the entire ilt range for this timers.
 +	 */
 +	if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
 +		struct ilt_client_info ilt_cli;
 +		/* use dummy TM client */
 +		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
 +		ilt_cli.start = 0;
 +		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
 +		ilt_cli.client_num = ILT_CLIENT_TM;
 +
 +		bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
 +	}
 +
 +	/* this assumes that reset_port() called before reset_func()*/
 +	if (!CHIP_IS_E1x(bp))
 +		bnx2x_pf_disable(bp);
 +
 +	bp->dmae_ready = 0;
 +}
 +
 +static void bnx2x_reset_port(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	u32 val;
 +
 +	/* Reset physical Link */
 +	bnx2x__link_reset(bp);
 +
 +	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
 +
 +	/* Do not rcv packets to BRB */
 +	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
 +	/* Do not direct rcv packets that are not for MCP to the BRB */
 +	REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
 +			   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
 +
 +	/* Configure AEU */
 +	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
 +
 +	msleep(100);
 +	/* Check for BRB port occupancy */
 +	val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
 +	if (val)
 +		DP(NETIF_MSG_IFDOWN,
 +		   "BRB1 is not empty  %d blocks are occupied\n", val);
 +
 +	/* TODO: Close Doorbell port? */
 +}
 +
 +static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
 +{
 +	struct bnx2x_func_state_params func_params = {0};
 +
 +	/* Prepare parameters for function state transitions */
 +	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 +
 +	func_params.f_obj = &bp->func_obj;
 +	func_params.cmd = BNX2X_F_CMD_HW_RESET;
 +
 +	func_params.params.hw_init.load_phase = load_code;
 +
 +	return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +static inline int bnx2x_func_stop(struct bnx2x *bp)
 +{
 +	struct bnx2x_func_state_params func_params = {0};
 +	int rc;
 +
 +	/* Prepare parameters for function state transitions */
 +	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 +	func_params.f_obj = &bp->func_obj;
 +	func_params.cmd = BNX2X_F_CMD_STOP;
 +
 +	/*
 +	 * Try to stop the function the 'good way'. If fails (in case
 +	 * of a parity error during bnx2x_chip_cleanup()) and we are
 +	 * not in a debug mode, perform a state transaction in order to
 +	 * enable further HW_RESET transaction.
 +	 */
 +	rc = bnx2x_func_state_change(bp, &func_params);
 +	if (rc) {
 +#ifdef BNX2X_STOP_ON_ERROR
 +		return rc;
 +#else
 +		BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
 +			  "transaction\n");
 +		__set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
 +		return bnx2x_func_state_change(bp, &func_params);
 +#endif
 +	}
 +
 +	return 0;
 +}
 +
 +/**
 + * bnx2x_send_unload_req - request unload mode from the MCP.
 + *
 + * @bp:			driver handle
 + * @unload_mode:	requested function's unload mode
 + *
 + * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
 + */
 +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
 +{
 +	u32 reset_code = 0;
 +	int port = BP_PORT(bp);
 +
 +	/* Select the UNLOAD request mode */
 +	if (unload_mode == UNLOAD_NORMAL)
 +		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 +
 +	else if (bp->flags & NO_WOL_FLAG)
 +		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
 +
 +	else if (bp->wol) {
 +		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 +		u8 *mac_addr = bp->dev->dev_addr;
 +		u32 val;
++		u16 pmc;
++
 +		/* The mac address is written to entries 1-4 to
- 		   preserve entry 0 which is used by the PMF */
++		 * preserve entry 0 which is used by the PMF
++		 */
 +		u8 entry = (BP_VN(bp) + 1)*8;
 +
 +		val = (mac_addr[0] << 8) | mac_addr[1];
 +		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
 +
 +		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
 +		      (mac_addr[4] << 8) | mac_addr[5];
 +		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
 +
++		/* Enable the PME and clear the status */
++		pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
++		pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
++		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
++
 +		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
 +
 +	} else
 +		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 +
 +	/* Send the request to the MCP */
 +	if (!BP_NOMCP(bp))
 +		reset_code = bnx2x_fw_command(bp, reset_code, 0);
 +	else {
 +		int path = BP_PATH(bp);
 +
 +		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
 +				     "%d, %d, %d\n",
 +		   path, load_count[path][0], load_count[path][1],
 +		   load_count[path][2]);
 +		load_count[path][0]--;
 +		load_count[path][1 + port]--;
 +		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
 +				     "%d, %d, %d\n",
 +		   path, load_count[path][0], load_count[path][1],
 +		   load_count[path][2]);
 +		if (load_count[path][0] == 0)
 +			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
 +		else if (load_count[path][1 + port] == 0)
 +			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
 +		else
 +			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
 +	}
 +
 +	return reset_code;
 +}
 +
 +/**
 + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
 + *
 + * @bp:		driver handle
 + */
 +void bnx2x_send_unload_done(struct bnx2x *bp)
 +{
 +	/* Report UNLOAD_DONE to MCP */
 +	if (!BP_NOMCP(bp))
 +		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
 +}
 +
 +static inline int bnx2x_func_wait_started(struct bnx2x *bp)
 +{
 +	int tout = 50;
 +	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 +
 +	if (!bp->port.pmf)
 +		return 0;
 +
 +	/*
 +	 * (assumption: No Attention from MCP at this stage)
 +	 * PMF probably in the middle of TXdisable/enable transaction
 +	 * 1. Sync IRS for default SB
 +	 * 2. Sync SP queue - this guarantes us that attention handling started
 +	 * 3. Wait, that TXdisable/enable transaction completes
 +	 *
 +	 * 1+2 guranty that if DCBx attention was scheduled it already changed
 +	 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
 +	 * received complettion for the transaction the state is TX_STOPPED.
 +	 * State will return to STARTED after completion of TX_STOPPED-->STARTED
 +	 * transaction.
 +	 */
 +
 +	/* make sure default SB ISR is done */
 +	if (msix)
 +		synchronize_irq(bp->msix_table[0].vector);
 +	else
 +		synchronize_irq(bp->pdev->irq);
 +
 +	flush_workqueue(bnx2x_wq);
 +
 +	while (bnx2x_func_get_state(bp, &bp->func_obj) !=
 +				BNX2X_F_STATE_STARTED && tout--)
 +		msleep(20);
 +
 +	if (bnx2x_func_get_state(bp, &bp->func_obj) !=
 +						BNX2X_F_STATE_STARTED) {
 +#ifdef BNX2X_STOP_ON_ERROR
 +		return -EBUSY;
 +#else
 +		/*
 +		 * Failed to complete the transaction in a "good way"
 +		 * Force both transactions with CLR bit
 +		 */
 +		struct bnx2x_func_state_params func_params = {0};
 +
 +		DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
 +			  "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
 +
 +		func_params.f_obj = &bp->func_obj;
 +		__set_bit(RAMROD_DRV_CLR_ONLY,
 +					&func_params.ramrod_flags);
 +
 +		/* STARTED-->TX_ST0PPED */
 +		func_params.cmd = BNX2X_F_CMD_TX_STOP;
 +		bnx2x_func_state_change(bp, &func_params);
 +
 +		/* TX_ST0PPED-->STARTED */
 +		func_params.cmd = BNX2X_F_CMD_TX_START;
 +		return bnx2x_func_state_change(bp, &func_params);
 +#endif
 +	}
 +
 +	return 0;
 +}
 +
 +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
 +{
 +	int port = BP_PORT(bp);
 +	int i, rc = 0;
 +	u8 cos;
 +	struct bnx2x_mcast_ramrod_params rparam = {0};
 +	u32 reset_code;
 +
 +	/* Wait until tx fastpath tasks complete */
 +	for_each_tx_queue(bp, i) {
 +		struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +		for_each_cos_in_tx_queue(fp, cos)
 +			rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
 +#ifdef BNX2X_STOP_ON_ERROR
 +		if (rc)
 +			return;
 +#endif
 +	}
 +
 +	/* Give HW time to discard old tx messages */
 +	usleep_range(1000, 1000);
 +
 +	/* Clean all ETH MACs */
 +	rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
 +	if (rc < 0)
 +		BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
 +
 +	/* Clean up UC list  */
 +	rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
 +				true);
 +	if (rc < 0)
 +		BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
 +			  "%d\n", rc);
 +
 +	/* Disable LLH */
 +	if (!CHIP_IS_E1(bp))
 +		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 +
 +	/* Set "drop all" (stop Rx).
 +	 * We need to take a netif_addr_lock() here in order to prevent
 +	 * a race between the completion code and this code.
 +	 */
 +	netif_addr_lock_bh(bp->dev);
 +	/* Schedule the rx_mode command */
 +	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
 +		set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
 +	else
 +		bnx2x_set_storm_rx_mode(bp);
 +
 +	/* Cleanup multicast configuration */
 +	rparam.mcast_obj = &bp->mcast_obj;
 +	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
 +	if (rc < 0)
 +		BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
 +
 +	netif_addr_unlock_bh(bp->dev);
 +
 +
 +
 +	/*
 +	 * Send the UNLOAD_REQUEST to the MCP. This will return if
 +	 * this function should perform FUNC, PORT or COMMON HW
 +	 * reset.
 +	 */
 +	reset_code = bnx2x_send_unload_req(bp, unload_mode);
 +
 +	/*
 +	 * (assumption: No Attention from MCP at this stage)
 +	 * PMF probably in the middle of TXdisable/enable transaction
 +	 */
 +	rc = bnx2x_func_wait_started(bp);
 +	if (rc) {
 +		BNX2X_ERR("bnx2x_func_wait_started failed\n");
 +#ifdef BNX2X_STOP_ON_ERROR
 +		return;
 +#endif
 +	}
 +
 +	/* Close multi and leading connections
 +	 * Completions for ramrods are collected in a synchronous way
 +	 */
 +	for_each_queue(bp, i)
 +		if (bnx2x_stop_queue(bp, i))
 +#ifdef BNX2X_STOP_ON_ERROR
 +			return;
 +#else
 +			goto unload_error;
 +#endif
 +	/* If SP settings didn't get completed so far - something
 +	 * very wrong has happen.
 +	 */
 +	if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
 +		BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
 +
 +#ifndef BNX2X_STOP_ON_ERROR
 +unload_error:
 +#endif
 +	rc = bnx2x_func_stop(bp);
 +	if (rc) {
 +		BNX2X_ERR("Function stop failed!\n");
 +#ifdef BNX2X_STOP_ON_ERROR
 +		return;
 +#endif
 +	}
 +
 +	/* Disable HW interrupts, NAPI */
 +	bnx2x_netif_stop(bp, 1);
 +
 +	/* Release IRQs */
 +	bnx2x_free_irq(bp);
 +
 +	/* Reset the chip */
 +	rc = bnx2x_reset_hw(bp, reset_code);
 +	if (rc)
 +		BNX2X_ERR("HW_RESET failed\n");
 +
 +
 +	/* Report UNLOAD_DONE to MCP */
 +	bnx2x_send_unload_done(bp);
 +}
 +
 +void bnx2x_disable_close_the_gate(struct bnx2x *bp)
 +{
 +	u32 val;
 +
 +	DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
 +
 +	if (CHIP_IS_E1(bp)) {
 +		int port = BP_PORT(bp);
 +		u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 +			MISC_REG_AEU_MASK_ATTN_FUNC_0;
 +
 +		val = REG_RD(bp, addr);
 +		val &= ~(0x300);
 +		REG_WR(bp, addr, val);
 +	} else {
 +		val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
 +		val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
 +			 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
 +		REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
 +	}
 +}
 +
 +/* Close gates #2, #3 and #4: */
 +static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
 +{
 +	u32 val;
 +
 +	/* Gates #2 and #4a are closed/opened for "not E1" only */
 +	if (!CHIP_IS_E1(bp)) {
 +		/* #4 */
 +		REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
 +		/* #2 */
 +		REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
 +	}
 +
 +	/* #3 */
 +	if (CHIP_IS_E1x(bp)) {
 +		/* Prevent interrupts from HC on both ports */
 +		val = REG_RD(bp, HC_REG_CONFIG_1);
 +		REG_WR(bp, HC_REG_CONFIG_1,
 +		       (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
 +		       (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
 +
 +		val = REG_RD(bp, HC_REG_CONFIG_0);
 +		REG_WR(bp, HC_REG_CONFIG_0,
 +		       (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
 +		       (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
 +	} else {
 +		/* Prevent incomming interrupts in IGU */
 +		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
 +
 +		REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
 +		       (!close) ?
 +		       (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
 +		       (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
 +	}
 +
 +	DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
 +		close ? "closing" : "opening");
 +	mmiowb();
 +}
 +
 +#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
 +
 +static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
 +{
 +	/* Do some magic... */
 +	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
 +	*magic_val = val & SHARED_MF_CLP_MAGIC;
 +	MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
 +}
 +
 +/**
 + * bnx2x_clp_reset_done - restore the value of the `magic' bit.
 + *
 + * @bp:		driver handle
 + * @magic_val:	old value of the `magic' bit.
 + */
 +static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
 +{
 +	/* Restore the `magic' bit value... */
 +	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
 +	MF_CFG_WR(bp, shared_mf_config.clp_mb,
 +		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
 +}
 +
 +/**
 + * bnx2x_reset_mcp_prep - prepare for MCP reset.
 + *
 + * @bp:		driver handle
 + * @magic_val:	old value of 'magic' bit.
 + *
 + * Takes care of CLP configurations.
 + */
 +static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
 +{
 +	u32 shmem;
 +	u32 validity_offset;
 +
 +	DP(NETIF_MSG_HW, "Starting\n");
 +
 +	/* Set `magic' bit in order to save MF config */
 +	if (!CHIP_IS_E1(bp))
 +		bnx2x_clp_reset_prep(bp, magic_val);
 +
 +	/* Get shmem offset */
 +	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
 +	validity_offset = offsetof(struct shmem_region, validity_map[0]);
 +
 +	/* Clear validity map flags */
 +	if (shmem > 0)
 +		REG_WR(bp, shmem + validity_offset, 0);
 +}
 +
 +#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
 +#define MCP_ONE_TIMEOUT  100    /* 100 ms */
 +
 +/**
 + * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
 + *
 + * @bp:	driver handle
 + */
 +static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
 +{
 +	/* special handling for emulation and FPGA,
 +	   wait 10 times longer */
 +	if (CHIP_REV_IS_SLOW(bp))
 +		msleep(MCP_ONE_TIMEOUT*10);
 +	else
 +		msleep(MCP_ONE_TIMEOUT);
 +}
 +
 +/*
 + * initializes bp->common.shmem_base and waits for validity signature to appear
 + */
 +static int bnx2x_init_shmem(struct bnx2x *bp)
 +{
 +	int cnt = 0;
 +	u32 val = 0;
 +
 +	do {
 +		bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
 +		if (bp->common.shmem_base) {
 +			val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
 +			if (val & SHR_MEM_VALIDITY_MB)
 +				return 0;
 +		}
 +
 +		bnx2x_mcp_wait_one(bp);
 +
 +	} while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
 +
 +	BNX2X_ERR("BAD MCP validity signature\n");
 +
 +	return -ENODEV;
 +}
 +
 +static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
 +{
 +	int rc = bnx2x_init_shmem(bp);
 +
 +	/* Restore the `magic' bit value */
 +	if (!CHIP_IS_E1(bp))
 +		bnx2x_clp_reset_done(bp, magic_val);
 +
 +	return rc;
 +}
 +
 +static void bnx2x_pxp_prep(struct bnx2x *bp)
 +{
 +	if (!CHIP_IS_E1(bp)) {
 +		REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
 +		REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
 +		mmiowb();
 +	}
 +}
 +
 +/*
 + * Reset the whole chip except for:
 + *      - PCIE core
 + *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
 + *              one reset bit)
 + *      - IGU
 + *      - MISC (including AEU)
 + *      - GRC
 + *      - RBCN, RBCP
 + */
 +static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
 +{
 +	u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
 +	u32 global_bits2, stay_reset2;
 +
 +	/*
 +	 * Bits that have to be set in reset_mask2 if we want to reset 'global'
 +	 * (per chip) blocks.
 +	 */
 +	global_bits2 =
 +		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
 +		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
 +
 +	/* Don't reset the following blocks */
 +	not_reset_mask1 =
 +		MISC_REGISTERS_RESET_REG_1_RST_HC |
 +		MISC_REGISTERS_RESET_REG_1_RST_PXPV |
 +		MISC_REGISTERS_RESET_REG_1_RST_PXP;
 +
 +	not_reset_mask2 =
 +		MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
 +		MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
 +		MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
 +		MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
 +		MISC_REGISTERS_RESET_REG_2_RST_RBCN |
 +		MISC_REGISTERS_RESET_REG_2_RST_GRC  |
 +		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
 +		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
 +		MISC_REGISTERS_RESET_REG_2_RST_ATC |
 +		MISC_REGISTERS_RESET_REG_2_PGLC;
 +
 +	/*
 +	 * Keep the following blocks in reset:
 +	 *  - all xxMACs are handled by the bnx2x_link code.
 +	 */
 +	stay_reset2 =
 +		MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
 +		MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
 +		MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
 +		MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
 +		MISC_REGISTERS_RESET_REG_2_UMAC0 |
 +		MISC_REGISTERS_RESET_REG_2_UMAC1 |
 +		MISC_REGISTERS_RESET_REG_2_XMAC |
 +		MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
 +
 +	/* Full reset masks according to the chip */
 +	reset_mask1 = 0xffffffff;
 +
 +	if (CHIP_IS_E1(bp))
 +		reset_mask2 = 0xffff;
 +	else if (CHIP_IS_E1H(bp))
 +		reset_mask2 = 0x1ffff;
 +	else if (CHIP_IS_E2(bp))
 +		reset_mask2 = 0xfffff;
 +	else /* CHIP_IS_E3 */
 +		reset_mask2 = 0x3ffffff;
 +
 +	/* Don't reset global blocks unless we need to */
 +	if (!global)
 +		reset_mask2 &= ~global_bits2;
 +
 +	/*
 +	 * In case of attention in the QM, we need to reset PXP
 +	 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
 +	 * because otherwise QM reset would release 'close the gates' shortly
 +	 * before resetting the PXP, then the PSWRQ would send a write
 +	 * request to PGLUE. Then when PXP is reset, PGLUE would try to
 +	 * read the payload data from PSWWR, but PSWWR would not
 +	 * respond. The write queue in PGLUE would stuck, dmae commands
 +	 * would not return. Therefore it's important to reset the second
 +	 * reset register (containing the
 +	 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
 +	 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
 +	 * bit).
 +	 */
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +	       reset_mask2 & (~not_reset_mask2));
 +
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 +	       reset_mask1 & (~not_reset_mask1));
 +
 +	barrier();
 +	mmiowb();
 +
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
 +	       reset_mask2 & (~stay_reset2));
 +
 +	barrier();
 +	mmiowb();
 +
 +	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
 +	mmiowb();
 +}
 +
 +/**
 + * bnx2x_er_poll_igu_vq - poll for pending writes bit.
 + * It should get cleared in no more than 1s.
 + *
 + * @bp:	driver handle
 + *
 + * It should get cleared in no more than 1s. Returns 0 if
 + * pending writes bit gets cleared.
 + */
 +static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
 +{
 +	u32 cnt = 1000;
 +	u32 pend_bits = 0;
 +
 +	do {
 +		pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
 +
 +		if (pend_bits == 0)
 +			break;
 +
 +		usleep_range(1000, 1000);
 +	} while (cnt-- > 0);
 +
 +	if (cnt <= 0) {
 +		BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
 +			  pend_bits);
 +		return -EBUSY;
 +	}
 +
 +	return 0;
 +}
 +
 +static int bnx2x_process_kill(struct bnx2x *bp, bool global)
 +{
 +	int cnt = 1000;
 +	u32 val = 0;
 +	u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
 +
 +
 +	/* Empty the Tetris buffer, wait for 1s */
 +	do {
 +		sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
 +		blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
 +		port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
 +		port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
 +		pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
 +		if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
 +		    ((port_is_idle_0 & 0x1) == 0x1) &&
 +		    ((port_is_idle_1 & 0x1) == 0x1) &&
 +		    (pgl_exp_rom2 == 0xffffffff))
 +			break;
 +		usleep_range(1000, 1000);
 +	} while (cnt-- > 0);
 +
 +	if (cnt <= 0) {
 +		DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
 +			  " are still"
 +			  " outstanding read requests after 1s!\n");
 +		DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
 +			  " port_is_idle_0=0x%08x,"
 +			  " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
 +			  sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
 +			  pgl_exp_rom2);
 +		return -EAGAIN;
 +	}
 +
 +	barrier();
 +
 +	/* Close gates #2, #3 and #4 */
 +	bnx2x_set_234_gates(bp, true);
 +
 +	/* Poll for IGU VQs for 57712 and newer chips */
 +	if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
 +		return -EAGAIN;
 +
 +
 +	/* TBD: Indicate that "process kill" is in progress to MCP */
 +
 +	/* Clear "unprepared" bit */
 +	REG_WR(bp, MISC_REG_UNPREPARED, 0);
 +	barrier();
 +
 +	/* Make sure all is written to the chip before the reset */
 +	mmiowb();
 +
 +	/* Wait for 1ms to empty GLUE and PCI-E core queues,
 +	 * PSWHST, GRC and PSWRD Tetris buffer.
 +	 */
 +	usleep_range(1000, 1000);
 +
 +	/* Prepare to chip reset: */
 +	/* MCP */
 +	if (global)
 +		bnx2x_reset_mcp_prep(bp, &val);
 +
 +	/* PXP */
 +	bnx2x_pxp_prep(bp);
 +	barrier();
 +
 +	/* reset the chip */
 +	bnx2x_process_kill_chip_reset(bp, global);
 +	barrier();
 +
 +	/* Recover after reset: */
 +	/* MCP */
 +	if (global && bnx2x_reset_mcp_comp(bp, val))
 +		return -EAGAIN;
 +
 +	/* TBD: Add resetting the NO_MCP mode DB here */
 +
 +	/* PXP */
 +	bnx2x_pxp_prep(bp);
 +
 +	/* Open the gates #2, #3 and #4 */
 +	bnx2x_set_234_gates(bp, false);
 +
 +	/* TBD: IGU/AEU preparation bring back the AEU/IGU to a
 +	 * reset state, re-enable attentions. */
 +
 +	return 0;
 +}
 +
 +int bnx2x_leader_reset(struct bnx2x *bp)
 +{
 +	int rc = 0;
 +	bool global = bnx2x_reset_is_global(bp);
 +
 +	/* Try to recover after the failure */
 +	if (bnx2x_process_kill(bp, global)) {
 +		netdev_err(bp->dev, "Something bad had happen on engine %d! "
 +				    "Aii!\n", BP_PATH(bp));
 +		rc = -EAGAIN;
 +		goto exit_leader_reset;
 +	}
 +
 +	/*
 +	 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
 +	 * state.
 +	 */
 +	bnx2x_set_reset_done(bp);
 +	if (global)
 +		bnx2x_clear_reset_global(bp);
 +
 +exit_leader_reset:
 +	bp->is_leader = 0;
 +	bnx2x_release_leader_lock(bp);
 +	smp_mb();
 +	return rc;
 +}
 +
 +static inline void bnx2x_recovery_failed(struct bnx2x *bp)
 +{
 +	netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
 +
 +	/* Disconnect this device */
 +	netif_device_detach(bp->dev);
 +
 +	/*
 +	 * Block ifup for all function on this engine until "process kill"
 +	 * or power cycle.
 +	 */
 +	bnx2x_set_reset_in_progress(bp);
 +
 +	/* Shut down the power */
 +	bnx2x_set_power_state(bp, PCI_D3hot);
 +
 +	bp->recovery_state = BNX2X_RECOVERY_FAILED;
 +
 +	smp_mb();
 +}
 +
 +/*
 + * Assumption: runs under rtnl lock. This together with the fact
 + * that it's called only from bnx2x_sp_rtnl() ensure that it
 + * will never be called when netif_running(bp->dev) is false.
 + */
 +static void bnx2x_parity_recover(struct bnx2x *bp)
 +{
 +	bool global = false;
 +
 +	DP(NETIF_MSG_HW, "Handling parity\n");
 +	while (1) {
 +		switch (bp->recovery_state) {
 +		case BNX2X_RECOVERY_INIT:
 +			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
 +			bnx2x_chk_parity_attn(bp, &global, false);
 +
 +			/* Try to get a LEADER_LOCK HW lock */
 +			if (bnx2x_trylock_leader_lock(bp)) {
 +				bnx2x_set_reset_in_progress(bp);
 +				/*
 +				 * Check if there is a global attention and if
 +				 * there was a global attention, set the global
 +				 * reset bit.
 +				 */
 +
 +				if (global)
 +					bnx2x_set_reset_global(bp);
 +
 +				bp->is_leader = 1;
 +			}
 +
 +			/* Stop the driver */
 +			/* If interface has been removed - break */
 +			if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
 +				return;
 +
 +			bp->recovery_state = BNX2X_RECOVERY_WAIT;
 +
 +			/*
 +			 * Reset MCP command sequence number and MCP mail box
 +			 * sequence as we are going to reset the MCP.
 +			 */
 +			if (global) {
 +				bp->fw_seq = 0;
 +				bp->fw_drv_pulse_wr_seq = 0;
 +			}
 +
 +			/* Ensure "is_leader", MCP command sequence and
 +			 * "recovery_state" update values are seen on other
 +			 * CPUs.
 +			 */
 +			smp_mb();
 +			break;
 +
 +		case BNX2X_RECOVERY_WAIT:
 +			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
 +			if (bp->is_leader) {
 +				int other_engine = BP_PATH(bp) ? 0 : 1;
 +				u32 other_load_counter =
 +					bnx2x_get_load_cnt(bp, other_engine);
 +				u32 load_counter =
 +					bnx2x_get_load_cnt(bp, BP_PATH(bp));
 +				global = bnx2x_reset_is_global(bp);
 +
 +				/*
 +				 * In case of a parity in a global block, let
 +				 * the first leader that performs a
 +				 * leader_reset() reset the global blocks in
 +				 * order to clear global attentions. Otherwise
 +				 * the the gates will remain closed for that
 +				 * engine.
 +				 */
 +				if (load_counter ||
 +				    (global && other_load_counter)) {
 +					/* Wait until all other functions get
 +					 * down.
 +					 */
 +					schedule_delayed_work(&bp->sp_rtnl_task,
 +								HZ/10);
 +					return;
 +				} else {
 +					/* If all other functions got down -
 +					 * try to bring the chip back to
 +					 * normal. In any case it's an exit
 +					 * point for a leader.
 +					 */
 +					if (bnx2x_leader_reset(bp)) {
 +						bnx2x_recovery_failed(bp);
 +						return;
 +					}
 +
 +					/* If we are here, means that the
 +					 * leader has succeeded and doesn't
 +					 * want to be a leader any more. Try
 +					 * to continue as a none-leader.
 +					 */
 +					break;
 +				}
 +			} else { /* non-leader */
 +				if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
 +					/* Try to get a LEADER_LOCK HW lock as
 +					 * long as a former leader may have
 +					 * been unloaded by the user or
 +					 * released a leadership by another
 +					 * reason.
 +					 */
 +					if (bnx2x_trylock_leader_lock(bp)) {
 +						/* I'm a leader now! Restart a
 +						 * switch case.
 +						 */
 +						bp->is_leader = 1;
 +						break;
 +					}
 +
 +					schedule_delayed_work(&bp->sp_rtnl_task,
 +								HZ/10);
 +					return;
 +
 +				} else {
 +					/*
 +					 * If there was a global attention, wait
 +					 * for it to be cleared.
 +					 */
 +					if (bnx2x_reset_is_global(bp)) {
 +						schedule_delayed_work(
 +							&bp->sp_rtnl_task,
 +							HZ/10);
 +						return;
 +					}
 +
 +					if (bnx2x_nic_load(bp, LOAD_NORMAL))
 +						bnx2x_recovery_failed(bp);
 +					else {
 +						bp->recovery_state =
 +							BNX2X_RECOVERY_DONE;
 +						smp_mb();
 +					}
 +
 +					return;
 +				}
 +			}
 +		default:
 +			return;
 +		}
 +	}
 +}
 +
 +/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
 + * scheduled on a general queue in order to prevent a dead lock.
 + */
 +static void bnx2x_sp_rtnl_task(struct work_struct *work)
 +{
 +	struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
 +
 +	rtnl_lock();
 +
 +	if (!netif_running(bp->dev))
 +		goto sp_rtnl_exit;
 +
 +	/* if stop on error is defined no recovery flows should be executed */
 +#ifdef BNX2X_STOP_ON_ERROR
 +	BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
 +		  "so reset not done to allow debug dump,\n"
 +		  "you will need to reboot when done\n");
 +	goto sp_rtnl_not_reset;
 +#endif
 +
 +	if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
 +		/*
 +		 * Clear all pending SP commands as we are going to reset the
 +		 * function anyway.
 +		 */
 +		bp->sp_rtnl_state = 0;
 +		smp_mb();
 +
 +		bnx2x_parity_recover(bp);
 +
 +		goto sp_rtnl_exit;
 +	}
 +
 +	if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
 +		/*
 +		 * Clear all pending SP commands as we are going to reset the
 +		 * function anyway.
 +		 */
 +		bp->sp_rtnl_state = 0;
 +		smp_mb();
 +
 +		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 +		bnx2x_nic_load(bp, LOAD_NORMAL);
 +
 +		goto sp_rtnl_exit;
 +	}
 +#ifdef BNX2X_STOP_ON_ERROR
 +sp_rtnl_not_reset:
 +#endif
 +	if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
 +		bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
 +
 +sp_rtnl_exit:
 +	rtnl_unlock();
 +}
 +
 +/* end of nic load/unload */
 +
 +static void bnx2x_period_task(struct work_struct *work)
 +{
 +	struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
 +
 +	if (!netif_running(bp->dev))
 +		goto period_task_exit;
 +
 +	if (CHIP_REV_IS_SLOW(bp)) {
 +		BNX2X_ERR("period task called on emulation, ignoring\n");
 +		goto period_task_exit;
 +	}
 +
 +	bnx2x_acquire_phy_lock(bp);
 +	/*
 +	 * The barrier is needed to ensure the ordering between the writing to
 +	 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
 +	 * the reading here.
 +	 */
 +	smp_mb();
 +	if (bp->port.pmf) {
 +		bnx2x_period_func(&bp->link_params, &bp->link_vars);
 +
 +		/* Re-queue task in 1 sec */
 +		queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
 +	}
 +
 +	bnx2x_release_phy_lock(bp);
 +period_task_exit:
 +	return;
 +}
 +
 +/*
 + * Init service functions
 + */
 +
 +static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
 +{
 +	u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
 +	u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
 +	return base + (BP_ABS_FUNC(bp)) * stride;
 +}
 +
 +static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
 +{
 +	u32 reg = bnx2x_get_pretend_reg(bp);
 +
 +	/* Flush all outstanding writes */
 +	mmiowb();
 +
 +	/* Pretend to be function 0 */
 +	REG_WR(bp, reg, 0);
 +	REG_RD(bp, reg);	/* Flush the GRC transaction (in the chip) */
 +
 +	/* From now we are in the "like-E1" mode */
 +	bnx2x_int_disable(bp);
 +
 +	/* Flush all outstanding writes */
 +	mmiowb();
 +
 +	/* Restore the original function */
 +	REG_WR(bp, reg, BP_ABS_FUNC(bp));
 +	REG_RD(bp, reg);
 +}
 +
 +static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
 +{
 +	if (CHIP_IS_E1(bp))
 +		bnx2x_int_disable(bp);
 +	else
 +		bnx2x_undi_int_disable_e1h(bp);
 +}
 +
 +static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
 +{
 +	u32 val;
 +
 +	/* Check if there is any driver already loaded */
 +	val = REG_RD(bp, MISC_REG_UNPREPARED);
 +	if (val == 0x1) {
 +
 +		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +		/*
 +		 * Check if it is the UNDI driver
 +		 * UNDI driver initializes CID offset for normal bell to 0x7
 +		 */
 +		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
 +		if (val == 0x7) {
 +			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 +			/* save our pf_num */
 +			int orig_pf_num = bp->pf_num;
 +			int port;
 +			u32 swap_en, swap_val, value;
 +
 +			/* clear the UNDI indication */
 +			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
 +
 +			BNX2X_DEV_INFO("UNDI is active! reset device\n");
 +
 +			/* try unload UNDI on port 0 */
 +			bp->pf_num = 0;
 +			bp->fw_seq =
 +			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
 +				DRV_MSG_SEQ_NUMBER_MASK);
 +			reset_code = bnx2x_fw_command(bp, reset_code, 0);
 +
 +			/* if UNDI is loaded on the other port */
 +			if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
 +
 +				/* send "DONE" for previous unload */
 +				bnx2x_fw_command(bp,
 +						 DRV_MSG_CODE_UNLOAD_DONE, 0);
 +
 +				/* unload UNDI on port 1 */
 +				bp->pf_num = 1;
 +				bp->fw_seq =
 +			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
 +					DRV_MSG_SEQ_NUMBER_MASK);
 +				reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 +
 +				bnx2x_fw_command(bp, reset_code, 0);
 +			}
 +
 +			bnx2x_undi_int_disable(bp);
 +			port = BP_PORT(bp);
 +
 +			/* close input traffic and wait for it */
 +			/* Do not rcv packets to BRB */
 +			REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
 +					   NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
 +			/* Do not direct rcv packets that are not for MCP to
 +			 * the BRB */
 +			REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
 +					   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
 +			/* clear AEU */
 +			REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 +					   MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
 +			msleep(10);
 +
 +			/* save NIG port swap info */
 +			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
 +			swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 +			/* reset device */
 +			REG_WR(bp,
 +			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 +			       0xd3ffffff);
 +
 +			value = 0x1400;
 +			if (CHIP_IS_E3(bp)) {
 +				value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
 +				value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
 +			}
 +
 +			REG_WR(bp,
 +			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +			       value);
 +
 +			/* take the NIG out of reset and restore swap values */
 +			REG_WR(bp,
 +			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
 +			       MISC_REGISTERS_RESET_REG_1_RST_NIG);
 +			REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
 +			REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
 +
 +			/* send unload done to the MCP */
 +			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
 +
 +			/* restore our func and fw_seq */
 +			bp->pf_num = orig_pf_num;
 +			bp->fw_seq =
 +			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
 +				DRV_MSG_SEQ_NUMBER_MASK);
 +		}
 +
 +		/* now it's safe to release the lock */
 +		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +	}
 +}
 +
 +static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 +{
 +	u32 val, val2, val3, val4, id;
 +	u16 pmc;
 +
 +	/* Get the chip revision id and number. */
 +	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
 +	val = REG_RD(bp, MISC_REG_CHIP_NUM);
 +	id = ((val & 0xffff) << 16);
 +	val = REG_RD(bp, MISC_REG_CHIP_REV);
 +	id |= ((val & 0xf) << 12);
 +	val = REG_RD(bp, MISC_REG_CHIP_METAL);
 +	id |= ((val & 0xff) << 4);
 +	val = REG_RD(bp, MISC_REG_BOND_ID);
 +	id |= (val & 0xf);
 +	bp->common.chip_id = id;
 +
 +	/* Set doorbell size */
 +	bp->db_size = (1 << BNX2X_DB_SHIFT);
 +
 +	if (!CHIP_IS_E1x(bp)) {
 +		val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
 +		if ((val & 1) == 0)
 +			val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
 +		else
 +			val = (val >> 1) & 1;
 +		BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
 +						       "2_PORT_MODE");
 +		bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
 +						 CHIP_2_PORT_MODE;
 +
 +		if (CHIP_MODE_IS_4_PORT(bp))
 +			bp->pfid = (bp->pf_num >> 1);	/* 0..3 */
 +		else
 +			bp->pfid = (bp->pf_num & 0x6);	/* 0, 2, 4, 6 */
 +	} else {
 +		bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
 +		bp->pfid = bp->pf_num;			/* 0..7 */
 +	}
 +
 +	bp->link_params.chip_id = bp->common.chip_id;
 +	BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
 +
 +	val = (REG_RD(bp, 0x2874) & 0x55);
 +	if ((bp->common.chip_id & 0x1) ||
 +	    (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
 +		bp->flags |= ONE_PORT_FLAG;
 +		BNX2X_DEV_INFO("single port device\n");
 +	}
 +
 +	val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
 +	bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
 +				 (val & MCPR_NVM_CFG4_FLASH_SIZE));
 +	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
 +		       bp->common.flash_size, bp->common.flash_size);
 +
 +	bnx2x_init_shmem(bp);
 +
 +
 +
 +	bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
 +					MISC_REG_GENERIC_CR_1 :
 +					MISC_REG_GENERIC_CR_0));
 +
 +	bp->link_params.shmem_base = bp->common.shmem_base;
 +	bp->link_params.shmem2_base = bp->common.shmem2_base;
 +	BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
 +		       bp->common.shmem_base, bp->common.shmem2_base);
 +
 +	if (!bp->common.shmem_base) {
 +		BNX2X_DEV_INFO("MCP not active\n");
 +		bp->flags |= NO_MCP_FLAG;
 +		return;
 +	}
 +
 +	bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
 +	BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
 +
 +	bp->link_params.hw_led_mode = ((bp->common.hw_config &
 +					SHARED_HW_CFG_LED_MODE_MASK) >>
 +				       SHARED_HW_CFG_LED_MODE_SHIFT);
 +
 +	bp->link_params.feature_config_flags = 0;
 +	val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
 +	if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
 +		bp->link_params.feature_config_flags |=
 +				FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
 +	else
 +		bp->link_params.feature_config_flags &=
 +				~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
 +
 +	val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
 +	bp->common.bc_ver = val;
 +	BNX2X_DEV_INFO("bc_ver %X\n", val);
 +	if (val < BNX2X_BC_VER) {
 +		/* for now only warn
 +		 * later we might need to enforce this */
 +		BNX2X_ERR("This driver needs bc_ver %X but found %X, "
 +			  "please upgrade BC\n", BNX2X_BC_VER, val);
 +	}
 +	bp->link_params.feature_config_flags |=
 +				(val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
 +				FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
 +
 +	bp->link_params.feature_config_flags |=
 +		(val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
 +		FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
 +
 +	bp->link_params.feature_config_flags |=
 +		(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
 +		FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
 +
 +	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
 +	bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
 +
 +	BNX2X_DEV_INFO("%sWoL capable\n",
 +		       (bp->flags & NO_WOL_FLAG) ? "not " : "");
 +
 +	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
 +	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
 +	val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
 +	val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
 +
 +	dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
 +		 val, val2, val3, val4);
 +}
 +
 +#define IGU_FID(val)	GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
 +#define IGU_VEC(val)	GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
 +
 +static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
 +{
 +	int pfid = BP_FUNC(bp);
 +	int igu_sb_id;
 +	u32 val;
 +	u8 fid, igu_sb_cnt = 0;
 +
 +	bp->igu_base_sb = 0xff;
 +	if (CHIP_INT_MODE_IS_BC(bp)) {
 +		int vn = BP_VN(bp);
 +		igu_sb_cnt = bp->igu_sb_cnt;
 +		bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
 +			FP_SB_MAX_E1x;
 +
 +		bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
 +			(CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
 +
 +		return;
 +	}
 +
 +	/* IGU in normal mode - read CAM */
 +	for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
 +	     igu_sb_id++) {
 +		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
 +		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
 +			continue;
 +		fid = IGU_FID(val);
 +		if ((fid & IGU_FID_ENCODE_IS_PF)) {
 +			if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
 +				continue;
 +			if (IGU_VEC(val) == 0)
 +				/* default status block */
 +				bp->igu_dsb_id = igu_sb_id;
 +			else {
 +				if (bp->igu_base_sb == 0xff)
 +					bp->igu_base_sb = igu_sb_id;
 +				igu_sb_cnt++;
 +			}
 +		}
 +	}
 +
 +#ifdef CONFIG_PCI_MSI
 +	/*
 +	 * It's expected that number of CAM entries for this functions is equal
 +	 * to the number evaluated based on the MSI-X table size. We want a
 +	 * harsh warning if these values are different!
 +	 */
 +	WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
 +#endif
 +
 +	if (igu_sb_cnt == 0)
 +		BNX2X_ERR("CAM configuration error\n");
 +}
 +
 +static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
 +						    u32 switch_cfg)
 +{
 +	int cfg_size = 0, idx, port = BP_PORT(bp);
 +
 +	/* Aggregation of supported attributes of all external phys */
 +	bp->port.supported[0] = 0;
 +	bp->port.supported[1] = 0;
 +	switch (bp->link_params.num_phys) {
 +	case 1:
 +		bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
 +		cfg_size = 1;
 +		break;
 +	case 2:
 +		bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
 +		cfg_size = 1;
 +		break;
 +	case 3:
 +		if (bp->link_params.multi_phy_config &
 +		    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
 +			bp->port.supported[1] =
 +				bp->link_params.phy[EXT_PHY1].supported;
 +			bp->port.supported[0] =
 +				bp->link_params.phy[EXT_PHY2].supported;
 +		} else {
 +			bp->port.supported[0] =
 +				bp->link_params.phy[EXT_PHY1].supported;
 +			bp->port.supported[1] =
 +				bp->link_params.phy[EXT_PHY2].supported;
 +		}
 +		cfg_size = 2;
 +		break;
 +	}
 +
 +	if (!(bp->port.supported[0] || bp->port.supported[1])) {
 +		BNX2X_ERR("NVRAM config error. BAD phy config."
 +			  "PHY1 config 0x%x, PHY2 config 0x%x\n",
 +			   SHMEM_RD(bp,
 +			   dev_info.port_hw_config[port].external_phy_config),
 +			   SHMEM_RD(bp,
 +			   dev_info.port_hw_config[port].external_phy_config2));
 +			return;
 +	}
 +
 +	if (CHIP_IS_E3(bp))
 +		bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
 +	else {
 +		switch (switch_cfg) {
 +		case SWITCH_CFG_1G:
 +			bp->port.phy_addr = REG_RD(
 +				bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
 +			break;
 +		case SWITCH_CFG_10G:
 +			bp->port.phy_addr = REG_RD(
 +				bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
 +			break;
 +		default:
 +			BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
 +				  bp->port.link_config[0]);
 +			return;
 +		}
 +	}
 +	BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
 +	/* mask what we support according to speed_cap_mask per configuration */
 +	for (idx = 0; idx < cfg_size; idx++) {
 +		if (!(bp->link_params.speed_cap_mask[idx] &
 +				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
 +			bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
 +
 +		if (!(bp->link_params.speed_cap_mask[idx] &
 +				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
 +			bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
 +
 +		if (!(bp->link_params.speed_cap_mask[idx] &
 +				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
 +			bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
 +
 +		if (!(bp->link_params.speed_cap_mask[idx] &
 +				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
 +			bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
 +
 +		if (!(bp->link_params.speed_cap_mask[idx] &
 +					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
 +			bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
 +						     SUPPORTED_1000baseT_Full);
 +
 +		if (!(bp->link_params.speed_cap_mask[idx] &
 +					PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
 +			bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
 +
 +		if (!(bp->link_params.speed_cap_mask[idx] &
 +					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
 +			bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
 +
 +	}
 +
 +	BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
 +		       bp->port.supported[1]);
 +}
 +
 +static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
 +{
 +	u32 link_config, idx, cfg_size = 0;
 +	bp->port.advertising[0] = 0;
 +	bp->port.advertising[1] = 0;
 +	switch (bp->link_params.num_phys) {
 +	case 1:
 +	case 2:
 +		cfg_size = 1;
 +		break;
 +	case 3:
 +		cfg_size = 2;
 +		break;
 +	}
 +	for (idx = 0; idx < cfg_size; idx++) {
 +		bp->link_params.req_duplex[idx] = DUPLEX_FULL;
 +		link_config = bp->port.link_config[idx];
 +		switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
 +		case PORT_FEATURE_LINK_SPEED_AUTO:
 +			if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
 +				bp->link_params.req_line_speed[idx] =
 +					SPEED_AUTO_NEG;
 +				bp->port.advertising[idx] |=
 +					bp->port.supported[idx];
 +			} else {
 +				/* force 10G, no AN */
 +				bp->link_params.req_line_speed[idx] =
 +					SPEED_10000;
 +				bp->port.advertising[idx] |=
 +					(ADVERTISED_10000baseT_Full |
 +					 ADVERTISED_FIBRE);
 +				continue;
 +			}
 +			break;
 +
 +		case PORT_FEATURE_LINK_SPEED_10M_FULL:
 +			if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
 +				bp->link_params.req_line_speed[idx] =
 +					SPEED_10;
 +				bp->port.advertising[idx] |=
 +					(ADVERTISED_10baseT_Full |
 +					 ADVERTISED_TP);
 +			} else {
 +				BNX2X_ERR("NVRAM config error. "
 +					    "Invalid link_config 0x%x"
 +					    "  speed_cap_mask 0x%x\n",
 +					    link_config,
 +				    bp->link_params.speed_cap_mask[idx]);
 +				return;
 +			}
 +			break;
 +
 +		case PORT_FEATURE_LINK_SPEED_10M_HALF:
 +			if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
 +				bp->link_params.req_line_speed[idx] =
 +					SPEED_10;
 +				bp->link_params.req_duplex[idx] =
 +					DUPLEX_HALF;
 +				bp->port.advertising[idx] |=
 +					(ADVERTISED_10baseT_Half |
 +					 ADVERTISED_TP);
 +			} else {
 +				BNX2X_ERR("NVRAM config error. "
 +					    "Invalid link_config 0x%x"
 +					    "  speed_cap_mask 0x%x\n",
 +					    link_config,
 +					  bp->link_params.speed_cap_mask[idx]);
 +				return;
 +			}
 +			break;
 +
 +		case PORT_FEATURE_LINK_SPEED_100M_FULL:
 +			if (bp->port.supported[idx] &
 +			    SUPPORTED_100baseT_Full) {
 +				bp->link_params.req_line_speed[idx] =
 +					SPEED_100;
 +				bp->port.advertising[idx] |=
 +					(ADVERTISED_100baseT_Full |
 +					 ADVERTISED_TP);
 +			} else {
 +				BNX2X_ERR("NVRAM config error. "
 +					    "Invalid link_config 0x%x"
 +					    "  speed_cap_mask 0x%x\n",
 +					    link_config,
 +					  bp->link_params.speed_cap_mask[idx]);
 +				return;
 +			}
 +			break;
 +
 +		case PORT_FEATURE_LINK_SPEED_100M_HALF:
 +			if (bp->port.supported[idx] &
 +			    SUPPORTED_100baseT_Half) {
 +				bp->link_params.req_line_speed[idx] =
 +								SPEED_100;
 +				bp->link_params.req_duplex[idx] =
 +								DUPLEX_HALF;
 +				bp->port.advertising[idx] |=
 +					(ADVERTISED_100baseT_Half |
 +					 ADVERTISED_TP);
 +			} else {
 +				BNX2X_ERR("NVRAM config error. "
 +				    "Invalid link_config 0x%x"
 +				    "  speed_cap_mask 0x%x\n",
 +				    link_config,
 +				    bp->link_params.speed_cap_mask[idx]);
 +				return;
 +			}
 +			break;
 +
 +		case PORT_FEATURE_LINK_SPEED_1G:
 +			if (bp->port.supported[idx] &
 +			    SUPPORTED_1000baseT_Full) {
 +				bp->link_params.req_line_speed[idx] =
 +					SPEED_1000;
 +				bp->port.advertising[idx] |=
 +					(ADVERTISED_1000baseT_Full |
 +					 ADVERTISED_TP);
 +			} else {
 +				BNX2X_ERR("NVRAM config error. "
 +				    "Invalid link_config 0x%x"
 +				    "  speed_cap_mask 0x%x\n",
 +				    link_config,
 +				    bp->link_params.speed_cap_mask[idx]);
 +				return;
 +			}
 +			break;
 +
 +		case PORT_FEATURE_LINK_SPEED_2_5G:
 +			if (bp->port.supported[idx] &
 +			    SUPPORTED_2500baseX_Full) {
 +				bp->link_params.req_line_speed[idx] =
 +					SPEED_2500;
 +				bp->port.advertising[idx] |=
 +					(ADVERTISED_2500baseX_Full |
 +						ADVERTISED_TP);
 +			} else {
 +				BNX2X_ERR("NVRAM config error. "
 +				    "Invalid link_config 0x%x"
 +				    "  speed_cap_mask 0x%x\n",
 +				    link_config,
 +				    bp->link_params.speed_cap_mask[idx]);
 +				return;
 +			}
 +			break;
 +
 +		case PORT_FEATURE_LINK_SPEED_10G_CX4:
 +			if (bp->port.supported[idx] &
 +			    SUPPORTED_10000baseT_Full) {
 +				bp->link_params.req_line_speed[idx] =
 +					SPEED_10000;
 +				bp->port.advertising[idx] |=
 +					(ADVERTISED_10000baseT_Full |
 +						ADVERTISED_FIBRE);
 +			} else {
 +				BNX2X_ERR("NVRAM config error. "
 +				    "Invalid link_config 0x%x"
 +				    "  speed_cap_mask 0x%x\n",
 +				    link_config,
 +				    bp->link_params.speed_cap_mask[idx]);
 +				return;
 +			}
 +			break;
 +		case PORT_FEATURE_LINK_SPEED_20G:
 +			bp->link_params.req_line_speed[idx] = SPEED_20000;
 +
 +			break;
 +		default:
 +			BNX2X_ERR("NVRAM config error. "
 +				  "BAD link speed link_config 0x%x\n",
 +				  link_config);
 +				bp->link_params.req_line_speed[idx] =
 +							SPEED_AUTO_NEG;
 +				bp->port.advertising[idx] =
 +						bp->port.supported[idx];
 +			break;
 +		}
 +
 +		bp->link_params.req_flow_ctrl[idx] = (link_config &
 +					 PORT_FEATURE_FLOW_CONTROL_MASK);
 +		if ((bp->link_params.req_flow_ctrl[idx] ==
 +		     BNX2X_FLOW_CTRL_AUTO) &&
 +		    !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
 +			bp->link_params.req_flow_ctrl[idx] =
 +				BNX2X_FLOW_CTRL_NONE;
 +		}
 +
 +		BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
 +			       " 0x%x advertising 0x%x\n",
 +			       bp->link_params.req_line_speed[idx],
 +			       bp->link_params.req_duplex[idx],
 +			       bp->link_params.req_flow_ctrl[idx],
 +			       bp->port.advertising[idx]);
 +	}
 +}
 +
 +static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
 +{
 +	mac_hi = cpu_to_be16(mac_hi);
 +	mac_lo = cpu_to_be32(mac_lo);
 +	memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
 +	memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
 +}
 +
 +static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	u32 config;
 +	u32 ext_phy_type, ext_phy_config;
 +
 +	bp->link_params.bp = bp;
 +	bp->link_params.port = port;
 +
 +	bp->link_params.lane_config =
 +		SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
 +
 +	bp->link_params.speed_cap_mask[0] =
 +		SHMEM_RD(bp,
 +			 dev_info.port_hw_config[port].speed_capability_mask);
 +	bp->link_params.speed_cap_mask[1] =
 +		SHMEM_RD(bp,
 +			 dev_info.port_hw_config[port].speed_capability_mask2);
 +	bp->port.link_config[0] =
 +		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
 +
 +	bp->port.link_config[1] =
 +		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
 +
 +	bp->link_params.multi_phy_config =
 +		SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
 +	/* If the device is capable of WoL, set the default state according
 +	 * to the HW
 +	 */
 +	config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
 +	bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
 +		   (config & PORT_FEATURE_WOL_ENABLED));
 +
 +	BNX2X_DEV_INFO("lane_config 0x%08x  "
 +		       "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
 +		       bp->link_params.lane_config,
 +		       bp->link_params.speed_cap_mask[0],
 +		       bp->port.link_config[0]);
 +
 +	bp->link_params.switch_cfg = (bp->port.link_config[0] &
 +				      PORT_FEATURE_CONNECTED_SWITCH_MASK);
 +	bnx2x_phy_probe(&bp->link_params);
 +	bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
 +
 +	bnx2x_link_settings_requested(bp);
 +
 +	/*
 +	 * If connected directly, work with the internal PHY, otherwise, work
 +	 * with the external PHY
 +	 */
 +	ext_phy_config =
 +		SHMEM_RD(bp,
 +			 dev_info.port_hw_config[port].external_phy_config);
 +	ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
 +	if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
 +		bp->mdio.prtad = bp->port.phy_addr;
 +
 +	else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
 +		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
 +		bp->mdio.prtad =
 +			XGXS_EXT_PHY_ADDR(ext_phy_config);
 +
 +	/*
 +	 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
 +	 * In MF mode, it is set to cover self test cases
 +	 */
 +	if (IS_MF(bp))
 +		bp->port.need_hw_lock = 1;
 +	else
 +		bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
 +							bp->common.shmem_base,
 +							bp->common.shmem2_base);
 +}
 +
 +#ifdef BCM_CNIC
 +static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
 +{
 +	int port = BP_PORT(bp);
 +	int func = BP_ABS_FUNC(bp);
 +
 +	u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
 +				drv_lic_key[port].max_iscsi_conn);
 +	u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
 +				drv_lic_key[port].max_fcoe_conn);
 +
 +	/* Get the number of maximum allowed iSCSI and FCoE connections */
 +	bp->cnic_eth_dev.max_iscsi_conn =
 +		(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
 +		BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
 +
 +	bp->cnic_eth_dev.max_fcoe_conn =
 +		(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
 +		BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
 +
 +	/* Read the WWN: */
 +	if (!IS_MF(bp)) {
 +		/* Port info */
 +		bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
 +			SHMEM_RD(bp,
 +				dev_info.port_hw_config[port].
 +				 fcoe_wwn_port_name_upper);
 +		bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
 +			SHMEM_RD(bp,
 +				dev_info.port_hw_config[port].
 +				 fcoe_wwn_port_name_lower);
 +
 +		/* Node info */
 +		bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
 +			SHMEM_RD(bp,
 +				dev_info.port_hw_config[port].
 +				 fcoe_wwn_node_name_upper);
 +		bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
 +			SHMEM_RD(bp,
 +				dev_info.port_hw_config[port].
 +				 fcoe_wwn_node_name_lower);
 +	} else if (!IS_MF_SD(bp)) {
 +		u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
 +
 +		/*
 +		 * Read the WWN info only if the FCoE feature is enabled for
 +		 * this function.
 +		 */
 +		if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
 +			/* Port info */
 +			bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
 +				MF_CFG_RD(bp, func_ext_config[func].
 +						fcoe_wwn_port_name_upper);
 +			bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
 +				MF_CFG_RD(bp, func_ext_config[func].
 +						fcoe_wwn_port_name_lower);
 +
 +			/* Node info */
 +			bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
 +				MF_CFG_RD(bp, func_ext_config[func].
 +						fcoe_wwn_node_name_upper);
 +			bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
 +				MF_CFG_RD(bp, func_ext_config[func].
 +						fcoe_wwn_node_name_lower);
 +		}
 +	}
 +
 +	BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
 +		       bp->cnic_eth_dev.max_iscsi_conn,
 +		       bp->cnic_eth_dev.max_fcoe_conn);
 +
 +	/*
 +	 * If maximum allowed number of connections is zero -
 +	 * disable the feature.
 +	 */
 +	if (!bp->cnic_eth_dev.max_iscsi_conn)
 +		bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
 +
 +	if (!bp->cnic_eth_dev.max_fcoe_conn)
 +		bp->flags |= NO_FCOE_FLAG;
 +}
 +#endif
 +
 +static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 +{
 +	u32 val, val2;
 +	int func = BP_ABS_FUNC(bp);
 +	int port = BP_PORT(bp);
 +#ifdef BCM_CNIC
 +	u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
 +	u8 *fip_mac = bp->fip_mac;
 +#endif
 +
 +	/* Zero primary MAC configuration */
 +	memset(bp->dev->dev_addr, 0, ETH_ALEN);
 +
 +	if (BP_NOMCP(bp)) {
 +		BNX2X_ERROR("warning: random MAC workaround active\n");
 +		random_ether_addr(bp->dev->dev_addr);
 +	} else if (IS_MF(bp)) {
 +		val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
 +		val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
 +		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
 +		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
 +			bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 +
 +#ifdef BCM_CNIC
 +		/* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
 +		 * FCoE MAC then the appropriate feature should be disabled.
 +		 */
 +		if (IS_MF_SI(bp)) {
 +			u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
 +			if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
 +				val2 = MF_CFG_RD(bp, func_ext_config[func].
 +						     iscsi_mac_addr_upper);
 +				val = MF_CFG_RD(bp, func_ext_config[func].
 +						    iscsi_mac_addr_lower);
 +				bnx2x_set_mac_buf(iscsi_mac, val, val2);
 +				BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
 +					       iscsi_mac);
 +			} else
 +				bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
 +
 +			if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
 +				val2 = MF_CFG_RD(bp, func_ext_config[func].
 +						     fcoe_mac_addr_upper);
 +				val = MF_CFG_RD(bp, func_ext_config[func].
 +						    fcoe_mac_addr_lower);
 +				bnx2x_set_mac_buf(fip_mac, val, val2);
 +				BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
 +					       fip_mac);
 +
 +			} else
 +				bp->flags |= NO_FCOE_FLAG;
 +		}
 +#endif
 +	} else {
 +		/* in SF read MACs from port configuration */
 +		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
 +		val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
 +		bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 +
 +#ifdef BCM_CNIC
 +		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +				    iscsi_mac_upper);
 +		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +				   iscsi_mac_lower);
 +		bnx2x_set_mac_buf(iscsi_mac, val, val2);
 +
 +		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +				    fcoe_fip_mac_upper);
 +		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +				   fcoe_fip_mac_lower);
 +		bnx2x_set_mac_buf(fip_mac, val, val2);
 +#endif
 +	}
 +
 +	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
 +	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 +
 +#ifdef BCM_CNIC
 +	/* Set the FCoE MAC in MF_SD mode */
 +	if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
 +		memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
 +
 +	/* Disable iSCSI if MAC configuration is
 +	 * invalid.
 +	 */
 +	if (!is_valid_ether_addr(iscsi_mac)) {
 +		bp->flags |= NO_ISCSI_FLAG;
 +		memset(iscsi_mac, 0, ETH_ALEN);
 +	}
 +
 +	/* Disable FCoE if MAC configuration is
 +	 * invalid.
 +	 */
 +	if (!is_valid_ether_addr(fip_mac)) {
 +		bp->flags |= NO_FCOE_FLAG;
 +		memset(bp->fip_mac, 0, ETH_ALEN);
 +	}
 +#endif
 +
 +	if (!is_valid_ether_addr(bp->dev->dev_addr))
 +		dev_err(&bp->pdev->dev,
 +			"bad Ethernet MAC address configuration: "
 +			"%pM, change it manually before bringing up "
 +			"the appropriate network interface\n",
 +			bp->dev->dev_addr);
 +}
 +
 +static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 +{
 +	int /*abs*/func = BP_ABS_FUNC(bp);
 +	int vn;
 +	u32 val = 0;
 +	int rc = 0;
 +
 +	bnx2x_get_common_hwinfo(bp);
 +
 +	/*
 +	 * initialize IGU parameters
 +	 */
 +	if (CHIP_IS_E1x(bp)) {
 +		bp->common.int_block = INT_BLOCK_HC;
 +
 +		bp->igu_dsb_id = DEF_SB_IGU_ID;
 +		bp->igu_base_sb = 0;
 +	} else {
 +		bp->common.int_block = INT_BLOCK_IGU;
 +
 +		/* do not allow device reset during IGU info preocessing */
 +		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +
 +		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
 +
 +		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
 +			int tout = 5000;
 +
 +			BNX2X_DEV_INFO("FORCING Normal Mode\n");
 +
 +			val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
 +			REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
 +			REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
 +
 +			while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
 +				tout--;
 +				usleep_range(1000, 1000);
 +			}
 +
 +			if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
 +				dev_err(&bp->pdev->dev,
 +					"FORCING Normal Mode failed!!!\n");
 +				return -EPERM;
 +			}
 +		}
 +
 +		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
 +			BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
 +			bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
 +		} else
 +			BNX2X_DEV_INFO("IGU Normal Mode\n");
 +
 +		bnx2x_get_igu_cam_info(bp);
 +
 +		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +	}
 +
 +	/*
 +	 * set base FW non-default (fast path) status block id, this value is
 +	 * used to initialize the fw_sb_id saved on the fp/queue structure to
 +	 * determine the id used by the FW.
 +	 */
 +	if (CHIP_IS_E1x(bp))
 +		bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
 +	else /*
 +	      * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
 +	      * the same queue are indicated on the same IGU SB). So we prefer
 +	      * FW and IGU SBs to be the same value.
 +	      */
 +		bp->base_fw_ndsb = bp->igu_base_sb;
 +
 +	BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
 +		       "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
 +		       bp->igu_sb_cnt, bp->base_fw_ndsb);
 +
 +	/*
 +	 * Initialize MF configuration
 +	 */
 +
 +	bp->mf_ov = 0;
 +	bp->mf_mode = 0;
 +	vn = BP_VN(bp);
 +
 +	if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
 +		BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
 +			       bp->common.shmem2_base, SHMEM2_RD(bp, size),
 +			      (u32)offsetof(struct shmem2_region, mf_cfg_addr));
 +
 +		if (SHMEM2_HAS(bp, mf_cfg_addr))
 +			bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
 +		else
 +			bp->common.mf_cfg_base = bp->common.shmem_base +
 +				offsetof(struct shmem_region, func_mb) +
 +				E1H_FUNC_MAX * sizeof(struct drv_func_mb);
 +		/*
 +		 * get mf configuration:
 +		 * 1. existence of MF configuration
 +		 * 2. MAC address must be legal (check only upper bytes)
 +		 *    for  Switch-Independent mode;
 +		 *    OVLAN must be legal for Switch-Dependent mode
 +		 * 3. SF_MODE configures specific MF mode
 +		 */
 +		if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
 +			/* get mf configuration */
 +			val = SHMEM_RD(bp,
 +				       dev_info.shared_feature_config.config);
 +			val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
 +
 +			switch (val) {
 +			case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
 +				val = MF_CFG_RD(bp, func_mf_config[func].
 +						mac_upper);
 +				/* check for legal mac (upper bytes)*/
 +				if (val != 0xffff) {
 +					bp->mf_mode = MULTI_FUNCTION_SI;
 +					bp->mf_config[vn] = MF_CFG_RD(bp,
 +						   func_mf_config[func].config);
 +				} else
 +					BNX2X_DEV_INFO("illegal MAC address "
 +						       "for SI\n");
 +				break;
 +			case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
 +				/* get OV configuration */
 +				val = MF_CFG_RD(bp,
 +					func_mf_config[FUNC_0].e1hov_tag);
 +				val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
 +
 +				if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
 +					bp->mf_mode = MULTI_FUNCTION_SD;
 +					bp->mf_config[vn] = MF_CFG_RD(bp,
 +						func_mf_config[func].config);
 +				} else
 +					BNX2X_DEV_INFO("illegal OV for SD\n");
 +				break;
 +			default:
 +				/* Unknown configuration: reset mf_config */
 +				bp->mf_config[vn] = 0;
 +				BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
 +			}
 +		}
 +
 +		BNX2X_DEV_INFO("%s function mode\n",
 +			       IS_MF(bp) ? "multi" : "single");
 +
 +		switch (bp->mf_mode) {
 +		case MULTI_FUNCTION_SD:
 +			val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
 +			      FUNC_MF_CFG_E1HOV_TAG_MASK;
 +			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
 +				bp->mf_ov = val;
 +				bp->path_has_ovlan = true;
 +
 +				BNX2X_DEV_INFO("MF OV for func %d is %d "
 +					       "(0x%04x)\n", func, bp->mf_ov,
 +					       bp->mf_ov);
 +			} else {
 +				dev_err(&bp->pdev->dev,
 +					"No valid MF OV for func %d, "
 +					"aborting\n", func);
 +				return -EPERM;
 +			}
 +			break;
 +		case MULTI_FUNCTION_SI:
 +			BNX2X_DEV_INFO("func %d is in MF "
 +				       "switch-independent mode\n", func);
 +			break;
 +		default:
 +			if (vn) {
 +				dev_err(&bp->pdev->dev,
 +					"VN %d is in a single function mode, "
 +					"aborting\n", vn);
 +				return -EPERM;
 +			}
 +			break;
 +		}
 +
 +		/* check if other port on the path needs ovlan:
 +		 * Since MF configuration is shared between ports
 +		 * Possible mixed modes are only
 +		 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
 +		 */
 +		if (CHIP_MODE_IS_4_PORT(bp) &&
 +		    !bp->path_has_ovlan &&
 +		    !IS_MF(bp) &&
 +		    bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
 +			u8 other_port = !BP_PORT(bp);
 +			u8 other_func = BP_PATH(bp) + 2*other_port;
 +			val = MF_CFG_RD(bp,
 +					func_mf_config[other_func].e1hov_tag);
 +			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
 +				bp->path_has_ovlan = true;
 +		}
 +	}
 +
 +	/* adjust igu_sb_cnt to MF for E1x */
 +	if (CHIP_IS_E1x(bp) && IS_MF(bp))
 +		bp->igu_sb_cnt /= E1HVN_MAX;
 +
 +	/* port info */
 +	bnx2x_get_port_hwinfo(bp);
 +
 +	/* Get MAC addresses */
 +	bnx2x_get_mac_hwinfo(bp);
 +
 +#ifdef BCM_CNIC
 +	bnx2x_get_cnic_info(bp);
 +#endif
 +
 +	/* Get current FW pulse sequence */
 +	if (!BP_NOMCP(bp)) {
 +		int mb_idx = BP_FW_MB_IDX(bp);
 +
 +		bp->fw_drv_pulse_wr_seq =
 +				(SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
 +				 DRV_PULSE_SEQ_MASK);
 +		BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
 +	}
 +
 +	return rc;
 +}
 +
 +static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
 +{
 +	int cnt, i, block_end, rodi;
 +	char vpd_data[BNX2X_VPD_LEN+1];
 +	char str_id_reg[VENDOR_ID_LEN+1];
 +	char str_id_cap[VENDOR_ID_LEN+1];
 +	u8 len;
 +
 +	cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
 +	memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
 +
 +	if (cnt < BNX2X_VPD_LEN)
 +		goto out_not_found;
 +
 +	i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
 +			     PCI_VPD_LRDT_RO_DATA);
 +	if (i < 0)
 +		goto out_not_found;
 +
 +
 +	block_end = i + PCI_VPD_LRDT_TAG_SIZE +
 +		    pci_vpd_lrdt_size(&vpd_data[i]);
 +
 +	i += PCI_VPD_LRDT_TAG_SIZE;
 +
 +	if (block_end > BNX2X_VPD_LEN)
 +		goto out_not_found;
 +
 +	rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
 +				   PCI_VPD_RO_KEYWORD_MFR_ID);
 +	if (rodi < 0)
 +		goto out_not_found;
 +
 +	len = pci_vpd_info_field_size(&vpd_data[rodi]);
 +
 +	if (len != VENDOR_ID_LEN)
 +		goto out_not_found;
 +
 +	rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
 +
 +	/* vendor specific info */
 +	snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
 +	snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
 +	if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
 +	    !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
 +
 +		rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
 +						PCI_VPD_RO_KEYWORD_VENDOR0);
 +		if (rodi >= 0) {
 +			len = pci_vpd_info_field_size(&vpd_data[rodi]);
 +
 +			rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
 +
 +			if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
 +				memcpy(bp->fw_ver, &vpd_data[rodi], len);
 +				bp->fw_ver[len] = ' ';
 +			}
 +		}
 +		return;
 +	}
 +out_not_found:
 +	return;
 +}
 +
 +static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
 +{
 +	u32 flags = 0;
 +
 +	if (CHIP_REV_IS_FPGA(bp))
 +		SET_FLAGS(flags, MODE_FPGA);
 +	else if (CHIP_REV_IS_EMUL(bp))
 +		SET_FLAGS(flags, MODE_EMUL);
 +	else
 +		SET_FLAGS(flags, MODE_ASIC);
 +
 +	if (CHIP_MODE_IS_4_PORT(bp))
 +		SET_FLAGS(flags, MODE_PORT4);
 +	else
 +		SET_FLAGS(flags, MODE_PORT2);
 +
 +	if (CHIP_IS_E2(bp))
 +		SET_FLAGS(flags, MODE_E2);
 +	else if (CHIP_IS_E3(bp)) {
 +		SET_FLAGS(flags, MODE_E3);
 +		if (CHIP_REV(bp) == CHIP_REV_Ax)
 +			SET_FLAGS(flags, MODE_E3_A0);
 +		else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
 +			SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
 +	}
 +
 +	if (IS_MF(bp)) {
 +		SET_FLAGS(flags, MODE_MF);
 +		switch (bp->mf_mode) {
 +		case MULTI_FUNCTION_SD:
 +			SET_FLAGS(flags, MODE_MF_SD);
 +			break;
 +		case MULTI_FUNCTION_SI:
 +			SET_FLAGS(flags, MODE_MF_SI);
 +			break;
 +		}
 +	} else
 +		SET_FLAGS(flags, MODE_SF);
 +
 +#if defined(__LITTLE_ENDIAN)
 +	SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
 +#else /*(__BIG_ENDIAN)*/
 +	SET_FLAGS(flags, MODE_BIG_ENDIAN);
 +#endif
 +	INIT_MODE_FLAGS(bp) = flags;
 +}
 +
 +static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 +{
 +	int func;
 +	int timer_interval;
 +	int rc;
 +
 +	mutex_init(&bp->port.phy_mutex);
 +	mutex_init(&bp->fw_mb_mutex);
 +	spin_lock_init(&bp->stats_lock);
 +#ifdef BCM_CNIC
 +	mutex_init(&bp->cnic_mutex);
 +#endif
 +
 +	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
 +	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
 +	INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
 +	rc = bnx2x_get_hwinfo(bp);
 +	if (rc)
 +		return rc;
 +
 +	bnx2x_set_modes_bitmap(bp);
 +
 +	rc = bnx2x_alloc_mem_bp(bp);
 +	if (rc)
 +		return rc;
 +
 +	bnx2x_read_fwinfo(bp);
 +
 +	func = BP_FUNC(bp);
 +
 +	/* need to reset chip if undi was active */
 +	if (!BP_NOMCP(bp))
 +		bnx2x_undi_unload(bp);
 +
 +	/* init fw_seq after undi_unload! */
 +	if (!BP_NOMCP(bp)) {
 +		bp->fw_seq =
 +			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
 +			 DRV_MSG_SEQ_NUMBER_MASK);
 +		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
 +	}
 +
 +	if (CHIP_REV_IS_FPGA(bp))
 +		dev_err(&bp->pdev->dev, "FPGA detected\n");
 +
 +	if (BP_NOMCP(bp) && (func == 0))
 +		dev_err(&bp->pdev->dev, "MCP disabled, "
 +					"must load devices in order!\n");
 +
 +	bp->multi_mode = multi_mode;
 +
 +	/* Set TPA flags */
 +	if (disable_tpa) {
 +		bp->flags &= ~TPA_ENABLE_FLAG;
 +		bp->dev->features &= ~NETIF_F_LRO;
 +	} else {
 +		bp->flags |= TPA_ENABLE_FLAG;
 +		bp->dev->features |= NETIF_F_LRO;
 +	}
 +	bp->disable_tpa = disable_tpa;
 +
 +	if (CHIP_IS_E1(bp))
 +		bp->dropless_fc = 0;
 +	else
 +		bp->dropless_fc = dropless_fc;
 +
 +	bp->mrrs = mrrs;
 +
 +	bp->tx_ring_size = MAX_TX_AVAIL;
 +
 +	/* make sure that the numbers are in the right granularity */
 +	bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
 +	bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
 +
 +	timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
 +	bp->current_interval = (poll ? poll : timer_interval);
 +
 +	init_timer(&bp->timer);
 +	bp->timer.expires = jiffies + bp->current_interval;
 +	bp->timer.data = (unsigned long) bp;
 +	bp->timer.function = bnx2x_timer;
 +
 +	bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
 +	bnx2x_dcbx_init_params(bp);
 +
 +#ifdef BCM_CNIC
 +	if (CHIP_IS_E1x(bp))
 +		bp->cnic_base_cl_id = FP_SB_MAX_E1x;
 +	else
 +		bp->cnic_base_cl_id = FP_SB_MAX_E2;
 +#endif
 +
 +	/* multiple tx priority */
 +	if (CHIP_IS_E1x(bp))
 +		bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
 +	if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
 +		bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
 +	if (CHIP_IS_E3B0(bp))
 +		bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
 +
 +	return rc;
 +}
 +
 +
 +/****************************************************************************
 +* General service functions
 +****************************************************************************/
 +
 +/*
 + * net_device service functions
 + */
 +
 +/* called with rtnl_lock */
 +static int bnx2x_open(struct net_device *dev)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +	bool global = false;
 +	int other_engine = BP_PATH(bp) ? 0 : 1;
 +	u32 other_load_counter, load_counter;
 +
 +	netif_carrier_off(dev);
 +
 +	bnx2x_set_power_state(bp, PCI_D0);
 +
 +	other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
 +	load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
 +
 +	/*
 +	 * If parity had happen during the unload, then attentions
 +	 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
 +	 * want the first function loaded on the current engine to
 +	 * complete the recovery.
 +	 */
 +	if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
 +	    bnx2x_chk_parity_attn(bp, &global, true))
 +		do {
 +			/*
 +			 * If there are attentions and they are in a global
 +			 * blocks, set the GLOBAL_RESET bit regardless whether
 +			 * it will be this function that will complete the
 +			 * recovery or not.
 +			 */
 +			if (global)
 +				bnx2x_set_reset_global(bp);
 +
 +			/*
 +			 * Only the first function on the current engine should
 +			 * try to recover in open. In case of attentions in
 +			 * global blocks only the first in the chip should try
 +			 * to recover.
 +			 */
 +			if ((!load_counter &&
 +			     (!global || !other_load_counter)) &&
 +			    bnx2x_trylock_leader_lock(bp) &&
 +			    !bnx2x_leader_reset(bp)) {
 +				netdev_info(bp->dev, "Recovered in open\n");
 +				break;
 +			}
 +
 +			/* recovery has failed... */
 +			bnx2x_set_power_state(bp, PCI_D3hot);
 +			bp->recovery_state = BNX2X_RECOVERY_FAILED;
 +
 +			netdev_err(bp->dev, "Recovery flow hasn't been properly"
 +			" completed yet. Try again later. If u still see this"
 +			" message after a few retries then power cycle is"
 +			" required.\n");
 +
 +			return -EAGAIN;
 +		} while (0);
 +
 +	bp->recovery_state = BNX2X_RECOVERY_DONE;
 +	return bnx2x_nic_load(bp, LOAD_OPEN);
 +}
 +
 +/* called with rtnl_lock */
 +static int bnx2x_close(struct net_device *dev)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +
 +	/* Unload the driver, release IRQs */
 +	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
 +
 +	/* Power off */
 +	bnx2x_set_power_state(bp, PCI_D3hot);
 +
 +	return 0;
 +}
 +
 +static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
 +					 struct bnx2x_mcast_ramrod_params *p)
 +{
 +	int mc_count = netdev_mc_count(bp->dev);
 +	struct bnx2x_mcast_list_elem *mc_mac =
 +		kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
 +	struct netdev_hw_addr *ha;
 +
 +	if (!mc_mac)
 +		return -ENOMEM;
 +
 +	INIT_LIST_HEAD(&p->mcast_list);
 +
 +	netdev_for_each_mc_addr(ha, bp->dev) {
 +		mc_mac->mac = bnx2x_mc_addr(ha);
 +		list_add_tail(&mc_mac->link, &p->mcast_list);
 +		mc_mac++;
 +	}
 +
 +	p->mcast_list_len = mc_count;
 +
 +	return 0;
 +}
 +
 +static inline void bnx2x_free_mcast_macs_list(
 +	struct bnx2x_mcast_ramrod_params *p)
 +{
 +	struct bnx2x_mcast_list_elem *mc_mac =
 +		list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
 +				 link);
 +
 +	WARN_ON(!mc_mac);
 +	kfree(mc_mac);
 +}
 +
 +/**
 + * bnx2x_set_uc_list - configure a new unicast MACs list.
 + *
 + * @bp: driver handle
 + *
 + * We will use zero (0) as a MAC type for these MACs.
 + */
 +static inline int bnx2x_set_uc_list(struct bnx2x *bp)
 +{
 +	int rc;
 +	struct net_device *dev = bp->dev;
 +	struct netdev_hw_addr *ha;
 +	struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
 +	unsigned long ramrod_flags = 0;
 +
 +	/* First schedule a cleanup up of old configuration */
 +	rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
 +	if (rc < 0) {
 +		BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
 +		return rc;
 +	}
 +
 +	netdev_for_each_uc_addr(ha, dev) {
 +		rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
 +				       BNX2X_UC_LIST_MAC, &ramrod_flags);
 +		if (rc < 0) {
 +			BNX2X_ERR("Failed to schedule ADD operations: %d\n",
 +				  rc);
 +			return rc;
 +		}
 +	}
 +
 +	/* Execute the pending commands */
 +	__set_bit(RAMROD_CONT, &ramrod_flags);
 +	return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
 +				 BNX2X_UC_LIST_MAC, &ramrod_flags);
 +}
 +
 +static inline int bnx2x_set_mc_list(struct bnx2x *bp)
 +{
 +	struct net_device *dev = bp->dev;
 +	struct bnx2x_mcast_ramrod_params rparam = {0};
 +	int rc = 0;
 +
 +	rparam.mcast_obj = &bp->mcast_obj;
 +
 +	/* first, clear all configured multicast MACs */
 +	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
 +	if (rc < 0) {
 +		BNX2X_ERR("Failed to clear multicast "
 +			  "configuration: %d\n", rc);
 +		return rc;
 +	}
 +
 +	/* then, configure a new MACs list */
 +	if (netdev_mc_count(dev)) {
 +		rc = bnx2x_init_mcast_macs_list(bp, &rparam);
 +		if (rc) {
 +			BNX2X_ERR("Failed to create multicast MACs "
 +				  "list: %d\n", rc);
 +			return rc;
 +		}
 +
 +		/* Now add the new MACs */
 +		rc = bnx2x_config_mcast(bp, &rparam,
 +					BNX2X_MCAST_CMD_ADD);
 +		if (rc < 0)
 +			BNX2X_ERR("Failed to set a new multicast "
 +				  "configuration: %d\n", rc);
 +
 +		bnx2x_free_mcast_macs_list(&rparam);
 +	}
 +
 +	return rc;
 +}
 +
 +
 +/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
 +void bnx2x_set_rx_mode(struct net_device *dev)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
 +
 +	if (bp->state != BNX2X_STATE_OPEN) {
 +		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
 +		return;
 +	}
 +
 +	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
 +
 +	if (dev->flags & IFF_PROMISC)
 +		rx_mode = BNX2X_RX_MODE_PROMISC;
 +	else if ((dev->flags & IFF_ALLMULTI) ||
 +		 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
 +		  CHIP_IS_E1(bp)))
 +		rx_mode = BNX2X_RX_MODE_ALLMULTI;
 +	else {
 +		/* some multicasts */
 +		if (bnx2x_set_mc_list(bp) < 0)
 +			rx_mode = BNX2X_RX_MODE_ALLMULTI;
 +
 +		if (bnx2x_set_uc_list(bp) < 0)
 +			rx_mode = BNX2X_RX_MODE_PROMISC;
 +	}
 +
 +	bp->rx_mode = rx_mode;
 +
 +	/* Schedule the rx_mode command */
 +	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
 +		set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
 +		return;
 +	}
 +
 +	bnx2x_set_storm_rx_mode(bp);
 +}
 +
 +/* called with rtnl_lock */
 +static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
 +			   int devad, u16 addr)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	u16 value;
 +	int rc;
 +
 +	DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
 +	   prtad, devad, addr);
 +
 +	/* The HW expects different devad if CL22 is used */
 +	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
 +
 +	bnx2x_acquire_phy_lock(bp);
 +	rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
 +	bnx2x_release_phy_lock(bp);
 +	DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
 +
 +	if (!rc)
 +		rc = value;
 +	return rc;
 +}
 +
 +/* called with rtnl_lock */
 +static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
 +			    u16 addr, u16 value)
 +{
 +	struct bnx2x *bp = netdev_priv(netdev);
 +	int rc;
 +
 +	DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
 +			   " value 0x%x\n", prtad, devad, addr, value);
 +
 +	/* The HW expects different devad if CL22 is used */
 +	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
 +
 +	bnx2x_acquire_phy_lock(bp);
 +	rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
 +	bnx2x_release_phy_lock(bp);
 +	return rc;
 +}
 +
 +/* called with rtnl_lock */
 +static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +	struct mii_ioctl_data *mdio = if_mii(ifr);
 +
 +	DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
 +	   mdio->phy_id, mdio->reg_num, mdio->val_in);
 +
 +	if (!netif_running(dev))
 +		return -EAGAIN;
 +
 +	return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void poll_bnx2x(struct net_device *dev)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +
 +	disable_irq(bp->pdev->irq);
 +	bnx2x_interrupt(bp->pdev->irq, dev);
 +	enable_irq(bp->pdev->irq);
 +}
 +#endif
 +
 +static const struct net_device_ops bnx2x_netdev_ops = {
 +	.ndo_open		= bnx2x_open,
 +	.ndo_stop		= bnx2x_close,
 +	.ndo_start_xmit		= bnx2x_start_xmit,
 +	.ndo_select_queue	= bnx2x_select_queue,
 +	.ndo_set_rx_mode	= bnx2x_set_rx_mode,
 +	.ndo_set_mac_address	= bnx2x_change_mac_addr,
 +	.ndo_validate_addr	= eth_validate_addr,
 +	.ndo_do_ioctl		= bnx2x_ioctl,
 +	.ndo_change_mtu		= bnx2x_change_mtu,
 +	.ndo_fix_features	= bnx2x_fix_features,
 +	.ndo_set_features	= bnx2x_set_features,
 +	.ndo_tx_timeout		= bnx2x_tx_timeout,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +	.ndo_poll_controller	= poll_bnx2x,
 +#endif
 +	.ndo_setup_tc		= bnx2x_setup_tc,
 +
 +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
 +	.ndo_fcoe_get_wwn	= bnx2x_fcoe_get_wwn,
 +#endif
 +};
 +
 +static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
 +{
 +	struct device *dev = &bp->pdev->dev;
 +
 +	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
 +		bp->flags |= USING_DAC_FLAG;
 +		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
 +			dev_err(dev, "dma_set_coherent_mask failed, "
 +				     "aborting\n");
 +			return -EIO;
 +		}
 +	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
 +		dev_err(dev, "System does not support DMA, aborting\n");
 +		return -EIO;
 +	}
 +
 +	return 0;
 +}
 +
 +static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
 +				    struct net_device *dev,
 +				    unsigned long board_type)
 +{
 +	struct bnx2x *bp;
 +	int rc;
 +
 +	SET_NETDEV_DEV(dev, &pdev->dev);
 +	bp = netdev_priv(dev);
 +
 +	bp->dev = dev;
 +	bp->pdev = pdev;
 +	bp->flags = 0;
 +	bp->pf_num = PCI_FUNC(pdev->devfn);
 +
 +	rc = pci_enable_device(pdev);
 +	if (rc) {
 +		dev_err(&bp->pdev->dev,
 +			"Cannot enable PCI device, aborting\n");
 +		goto err_out;
 +	}
 +
 +	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 +		dev_err(&bp->pdev->dev,
 +			"Cannot find PCI device base address, aborting\n");
 +		rc = -ENODEV;
 +		goto err_out_disable;
 +	}
 +
 +	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 +		dev_err(&bp->pdev->dev, "Cannot find second PCI device"
 +		       " base address, aborting\n");
 +		rc = -ENODEV;
 +		goto err_out_disable;
 +	}
 +
 +	if (atomic_read(&pdev->enable_cnt) == 1) {
 +		rc = pci_request_regions(pdev, DRV_MODULE_NAME);
 +		if (rc) {
 +			dev_err(&bp->pdev->dev,
 +				"Cannot obtain PCI resources, aborting\n");
 +			goto err_out_disable;
 +		}
 +
 +		pci_set_master(pdev);
 +		pci_save_state(pdev);
 +	}
 +
 +	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 +	if (bp->pm_cap == 0) {
 +		dev_err(&bp->pdev->dev,
 +			"Cannot find power management capability, aborting\n");
 +		rc = -EIO;
 +		goto err_out_release;
 +	}
 +
 +	if (!pci_is_pcie(pdev)) {
 +		dev_err(&bp->pdev->dev,	"Not PCI Express, aborting\n");
 +		rc = -EIO;
 +		goto err_out_release;
 +	}
 +
 +	rc = bnx2x_set_coherency_mask(bp);
 +	if (rc)
 +		goto err_out_release;
 +
 +	dev->mem_start = pci_resource_start(pdev, 0);
 +	dev->base_addr = dev->mem_start;
 +	dev->mem_end = pci_resource_end(pdev, 0);
 +
 +	dev->irq = pdev->irq;
 +
 +	bp->regview = pci_ioremap_bar(pdev, 0);
 +	if (!bp->regview) {
 +		dev_err(&bp->pdev->dev,
 +			"Cannot map register space, aborting\n");
 +		rc = -ENOMEM;
 +		goto err_out_release;
 +	}
 +
 +	bnx2x_set_power_state(bp, PCI_D0);
 +
 +	/* clean indirect addresses */
 +	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 +			       PCICFG_VENDOR_ID_OFFSET);
 +	/*
 +	 * Clean the following indirect addresses for all functions since it
 +	 * is not used by the driver.
 +	 */
 +	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
 +	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
 +	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
 +	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
 +
 +	if (CHIP_IS_E1x(bp)) {
 +		REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
 +		REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
 +		REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
 +		REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
 +	}
 +
 +	/*
 +	 * Enable internal target-read (in case we are probed after PF FLR).
 +	 * Must be done prior to any BAR read access. Only for 57712 and up
 +	 */
 +	if (board_type != BCM57710 &&
 +	    board_type != BCM57711 &&
 +	    board_type != BCM57711E)
 +		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 +
 +	/* Reset the load counter */
 +	bnx2x_clear_load_cnt(bp);
 +
 +	dev->watchdog_timeo = TX_TIMEOUT;
 +
 +	dev->netdev_ops = &bnx2x_netdev_ops;
 +	bnx2x_set_ethtool_ops(dev);
 +
 +	dev->priv_flags |= IFF_UNICAST_FLT;
 +
 +	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 +		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO |
 +		NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
 +
 +	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 +		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 +
 +	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
 +	if (bp->flags & USING_DAC_FLAG)
 +		dev->features |= NETIF_F_HIGHDMA;
 +
 +	/* Add Loopback capability to the device */
 +	dev->hw_features |= NETIF_F_LOOPBACK;
 +
 +#ifdef BCM_DCBNL
 +	dev->dcbnl_ops = &bnx2x_dcbnl_ops;
 +#endif
 +
 +	/* get_port_hwinfo() will set prtad and mmds properly */
 +	bp->mdio.prtad = MDIO_PRTAD_NONE;
 +	bp->mdio.mmds = 0;
 +	bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
 +	bp->mdio.dev = dev;
 +	bp->mdio.mdio_read = bnx2x_mdio_read;
 +	bp->mdio.mdio_write = bnx2x_mdio_write;
 +
 +	return 0;
 +
 +err_out_release:
 +	if (atomic_read(&pdev->enable_cnt) == 1)
 +		pci_release_regions(pdev);
 +
 +err_out_disable:
 +	pci_disable_device(pdev);
 +	pci_set_drvdata(pdev, NULL);
 +
 +err_out:
 +	return rc;
 +}
 +
 +static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
 +						 int *width, int *speed)
 +{
 +	u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
 +
 +	*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
 +
 +	/* return value of 1=2.5GHz 2=5GHz */
 +	*speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
 +}
 +
 +static int bnx2x_check_firmware(struct bnx2x *bp)
 +{
 +	const struct firmware *firmware = bp->firmware;
 +	struct bnx2x_fw_file_hdr *fw_hdr;
 +	struct bnx2x_fw_file_section *sections;
 +	u32 offset, len, num_ops;
 +	u16 *ops_offsets;
 +	int i;
 +	const u8 *fw_ver;
 +
 +	if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
 +		return -EINVAL;
 +
 +	fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
 +	sections = (struct bnx2x_fw_file_section *)fw_hdr;
 +
 +	/* Make sure none of the offsets and sizes make us read beyond
 +	 * the end of the firmware data */
 +	for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
 +		offset = be32_to_cpu(sections[i].offset);
 +		len = be32_to_cpu(sections[i].len);
 +		if (offset + len > firmware->size) {
 +			dev_err(&bp->pdev->dev,
 +				"Section %d length is out of bounds\n", i);
 +			return -EINVAL;
 +		}
 +	}
 +
 +	/* Likewise for the init_ops offsets */
 +	offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
 +	ops_offsets = (u16 *)(firmware->data + offset);
 +	num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
 +
 +	for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
 +		if (be16_to_cpu(ops_offsets[i]) > num_ops) {
 +			dev_err(&bp->pdev->dev,
 +				"Section offset %d is out of bounds\n", i);
 +			return -EINVAL;
 +		}
 +	}
 +
 +	/* Check FW version */
 +	offset = be32_to_cpu(fw_hdr->fw_version.offset);
 +	fw_ver = firmware->data + offset;
 +	if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
 +	    (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
 +	    (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
 +	    (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
 +		dev_err(&bp->pdev->dev,
 +			"Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
 +		       fw_ver[0], fw_ver[1], fw_ver[2],
 +		       fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
 +		       BCM_5710_FW_MINOR_VERSION,
 +		       BCM_5710_FW_REVISION_VERSION,
 +		       BCM_5710_FW_ENGINEERING_VERSION);
 +		return -EINVAL;
 +	}
 +
 +	return 0;
 +}
 +
 +static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 +{
 +	const __be32 *source = (const __be32 *)_source;
 +	u32 *target = (u32 *)_target;
 +	u32 i;
 +
 +	for (i = 0; i < n/4; i++)
 +		target[i] = be32_to_cpu(source[i]);
 +}
 +
 +/*
 +   Ops array is stored in the following format:
 +   {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
 + */
 +static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
 +{
 +	const __be32 *source = (const __be32 *)_source;
 +	struct raw_op *target = (struct raw_op *)_target;
 +	u32 i, j, tmp;
 +
 +	for (i = 0, j = 0; i < n/8; i++, j += 2) {
 +		tmp = be32_to_cpu(source[j]);
 +		target[i].op = (tmp >> 24) & 0xff;
 +		target[i].offset = tmp & 0xffffff;
 +		target[i].raw_data = be32_to_cpu(source[j + 1]);
 +	}
 +}
 +
 +/**
 + * IRO array is stored in the following format:
 + * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
 + */
 +static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
 +{
 +	const __be32 *source = (const __be32 *)_source;
 +	struct iro *target = (struct iro *)_target;
 +	u32 i, j, tmp;
 +
 +	for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
 +		target[i].base = be32_to_cpu(source[j]);
 +		j++;
 +		tmp = be32_to_cpu(source[j]);
 +		target[i].m1 = (tmp >> 16) & 0xffff;
 +		target[i].m2 = tmp & 0xffff;
 +		j++;
 +		tmp = be32_to_cpu(source[j]);
 +		target[i].m3 = (tmp >> 16) & 0xffff;
 +		target[i].size = tmp & 0xffff;
 +		j++;
 +	}
 +}
 +
 +static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 +{
 +	const __be16 *source = (const __be16 *)_source;
 +	u16 *target = (u16 *)_target;
 +	u32 i;
 +
 +	for (i = 0; i < n/2; i++)
 +		target[i] = be16_to_cpu(source[i]);
 +}
 +
 +#define BNX2X_ALLOC_AND_SET(arr, lbl, func)				\
 +do {									\
 +	u32 len = be32_to_cpu(fw_hdr->arr.len);				\
 +	bp->arr = kmalloc(len, GFP_KERNEL);				\
 +	if (!bp->arr) {							\
 +		pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
 +		goto lbl;						\
 +	}								\
 +	func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),	\
 +	     (u8 *)bp->arr, len);					\
 +} while (0)
 +
 +int bnx2x_init_firmware(struct bnx2x *bp)
 +{
 +	const char *fw_file_name;
 +	struct bnx2x_fw_file_hdr *fw_hdr;
 +	int rc;
 +
 +	if (CHIP_IS_E1(bp))
 +		fw_file_name = FW_FILE_NAME_E1;
 +	else if (CHIP_IS_E1H(bp))
 +		fw_file_name = FW_FILE_NAME_E1H;
 +	else if (!CHIP_IS_E1x(bp))
 +		fw_file_name = FW_FILE_NAME_E2;
 +	else {
 +		BNX2X_ERR("Unsupported chip revision\n");
 +		return -EINVAL;
 +	}
 +
 +	BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
 +
 +	rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
 +	if (rc) {
 +		BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
 +		goto request_firmware_exit;
 +	}
 +
 +	rc = bnx2x_check_firmware(bp);
 +	if (rc) {
 +		BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
 +		goto request_firmware_exit;
 +	}
 +
 +	fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
 +
 +	/* Initialize the pointers to the init arrays */
 +	/* Blob */
 +	BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
 +
 +	/* Opcodes */
 +	BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
 +
 +	/* Offsets */
 +	BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
 +			    be16_to_cpu_n);
 +
 +	/* STORMs firmware */
 +	INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
 +			be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
 +	INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
 +			be32_to_cpu(fw_hdr->tsem_pram_data.offset);
 +	INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
 +			be32_to_cpu(fw_hdr->usem_int_table_data.offset);
 +	INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
 +			be32_to_cpu(fw_hdr->usem_pram_data.offset);
 +	INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
 +			be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
 +	INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
 +			be32_to_cpu(fw_hdr->xsem_pram_data.offset);
 +	INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
 +			be32_to_cpu(fw_hdr->csem_int_table_data.offset);
 +	INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
 +			be32_to_cpu(fw_hdr->csem_pram_data.offset);
 +	/* IRO */
 +	BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
 +
 +	return 0;
 +
 +iro_alloc_err:
 +	kfree(bp->init_ops_offsets);
 +init_offsets_alloc_err:
 +	kfree(bp->init_ops);
 +init_ops_alloc_err:
 +	kfree(bp->init_data);
 +request_firmware_exit:
 +	release_firmware(bp->firmware);
 +
 +	return rc;
 +}
 +
 +static void bnx2x_release_firmware(struct bnx2x *bp)
 +{
 +	kfree(bp->init_ops_offsets);
 +	kfree(bp->init_ops);
 +	kfree(bp->init_data);
 +	release_firmware(bp->firmware);
 +}
 +
 +
 +static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
 +	.init_hw_cmn_chip = bnx2x_init_hw_common_chip,
 +	.init_hw_cmn      = bnx2x_init_hw_common,
 +	.init_hw_port     = bnx2x_init_hw_port,
 +	.init_hw_func     = bnx2x_init_hw_func,
 +
 +	.reset_hw_cmn     = bnx2x_reset_common,
 +	.reset_hw_port    = bnx2x_reset_port,
 +	.reset_hw_func    = bnx2x_reset_func,
 +
 +	.gunzip_init      = bnx2x_gunzip_init,
 +	.gunzip_end       = bnx2x_gunzip_end,
 +
 +	.init_fw          = bnx2x_init_firmware,
 +	.release_fw       = bnx2x_release_firmware,
 +};
 +
 +void bnx2x__init_func_obj(struct bnx2x *bp)
 +{
 +	/* Prepare DMAE related driver resources */
 +	bnx2x_setup_dmae(bp);
 +
 +	bnx2x_init_func_obj(bp, &bp->func_obj,
 +			    bnx2x_sp(bp, func_rdata),
 +			    bnx2x_sp_mapping(bp, func_rdata),
 +			    &bnx2x_func_sp_drv);
 +}
 +
 +/* must be called after sriov-enable */
 +static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
 +{
 +	int cid_count = BNX2X_L2_CID_COUNT(bp);
 +
 +#ifdef BCM_CNIC
 +	cid_count += CNIC_CID_MAX;
 +#endif
 +	return roundup(cid_count, QM_CID_ROUND);
 +}
 +
 +/**
 + * bnx2x_get_num_none_def_sbs - return the number of none default SBs
 + *
 + * @dev:	pci device
 + *
 + */
 +static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
 +{
 +	int pos;
 +	u16 control;
 +
 +	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
 +
 +	/*
 +	 * If MSI-X is not supported - return number of SBs needed to support
 +	 * one fast path queue: one FP queue + SB for CNIC
 +	 */
 +	if (!pos)
 +		return 1 + CNIC_PRESENT;
 +
 +	/*
 +	 * The value in the PCI configuration space is the index of the last
 +	 * entry, namely one less than the actual size of the table, which is
 +	 * exactly what we want to return from this function: number of all SBs
 +	 * without the default SB.
 +	 */
 +	pci_read_config_word(pdev, pos  + PCI_MSI_FLAGS, &control);
 +	return control & PCI_MSIX_FLAGS_QSIZE;
 +}
 +
 +static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 +				    const struct pci_device_id *ent)
 +{
 +	struct net_device *dev = NULL;
 +	struct bnx2x *bp;
 +	int pcie_width, pcie_speed;
 +	int rc, max_non_def_sbs;
 +	int rx_count, tx_count, rss_count;
 +	/*
 +	 * An estimated maximum supported CoS number according to the chip
 +	 * version.
 +	 * We will try to roughly estimate the maximum number of CoSes this chip
 +	 * may support in order to minimize the memory allocated for Tx
 +	 * netdev_queue's. This number will be accurately calculated during the
 +	 * initialization of bp->max_cos based on the chip versions AND chip
 +	 * revision in the bnx2x_init_bp().
 +	 */
 +	u8 max_cos_est = 0;
 +
 +	switch (ent->driver_data) {
 +	case BCM57710:
 +	case BCM57711:
 +	case BCM57711E:
 +		max_cos_est = BNX2X_MULTI_TX_COS_E1X;
 +		break;
 +
 +	case BCM57712:
 +	case BCM57712_MF:
 +		max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
 +		break;
 +
 +	case BCM57800:
 +	case BCM57800_MF:
 +	case BCM57810:
 +	case BCM57810_MF:
 +	case BCM57840:
 +	case BCM57840_MF:
 +		max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
 +		break;
 +
 +	default:
 +		pr_err("Unknown board_type (%ld), aborting\n",
 +			   ent->driver_data);
 +		return -ENODEV;
 +	}
 +
 +	max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
 +
 +	/* !!! FIXME !!!
 +	 * Do not allow the maximum SB count to grow above 16
 +	 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
 +	 * We will use the FP_SB_MAX_E1x macro for this matter.
 +	 */
 +	max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
 +
 +	WARN_ON(!max_non_def_sbs);
 +
 +	/* Maximum number of RSS queues: one IGU SB goes to CNIC */
 +	rss_count = max_non_def_sbs - CNIC_PRESENT;
 +
 +	/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
 +	rx_count = rss_count + FCOE_PRESENT;
 +
 +	/*
 +	 * Maximum number of netdev Tx queues:
 +	 *      Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
 +	 */
 +	tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
 +
 +	/* dev zeroed in init_etherdev */
 +	dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
 +	if (!dev) {
 +		dev_err(&pdev->dev, "Cannot allocate net device\n");
 +		return -ENOMEM;
 +	}
 +
 +	bp = netdev_priv(dev);
 +
 +	DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
 +			  tx_count, rx_count);
 +
 +	bp->igu_sb_cnt = max_non_def_sbs;
 +	bp->msg_enable = debug;
 +	pci_set_drvdata(pdev, dev);
 +
 +	rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
 +	if (rc < 0) {
 +		free_netdev(dev);
 +		return rc;
 +	}
 +
 +	DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs);
 +
 +	rc = bnx2x_init_bp(bp);
 +	if (rc)
 +		goto init_one_exit;
 +
 +	/*
 +	 * Map doorbels here as we need the real value of bp->max_cos which
 +	 * is initialized in bnx2x_init_bp().
 +	 */
 +	bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
 +					min_t(u64, BNX2X_DB_SIZE(bp),
 +					      pci_resource_len(pdev, 2)));
 +	if (!bp->doorbells) {
 +		dev_err(&bp->pdev->dev,
 +			"Cannot map doorbell space, aborting\n");
 +		rc = -ENOMEM;
 +		goto init_one_exit;
 +	}
 +
 +	/* calc qm_cid_count */
 +	bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
 +
 +#ifdef BCM_CNIC
 +	/* disable FCOE L2 queue for E1x and E3*/
 +	if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
 +		bp->flags |= NO_FCOE_FLAG;
 +
 +#endif
 +
 +	/* Configure interrupt mode: try to enable MSI-X/MSI if
 +	 * needed, set bp->num_queues appropriately.
 +	 */
 +	bnx2x_set_int_mode(bp);
 +
 +	/* Add all NAPI objects */
 +	bnx2x_add_all_napi(bp);
 +
 +	rc = register_netdev(dev);
 +	if (rc) {
 +		dev_err(&pdev->dev, "Cannot register net device\n");
 +		goto init_one_exit;
 +	}
 +
 +#ifdef BCM_CNIC
 +	if (!NO_FCOE(bp)) {
 +		/* Add storage MAC address */
 +		rtnl_lock();
 +		dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 +		rtnl_unlock();
 +	}
 +#endif
 +
 +	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
 +
 +	netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
 +		    board_info[ent->driver_data].name,
 +		    (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
 +		    pcie_width,
 +		    ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
 +		     (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
 +		    "5GHz (Gen2)" : "2.5GHz",
 +		    dev->base_addr, bp->pdev->irq, dev->dev_addr);
 +
 +	return 0;
 +
 +init_one_exit:
 +	if (bp->regview)
 +		iounmap(bp->regview);
 +
 +	if (bp->doorbells)
 +		iounmap(bp->doorbells);
 +
 +	free_netdev(dev);
 +
 +	if (atomic_read(&pdev->enable_cnt) == 1)
 +		pci_release_regions(pdev);
 +
 +	pci_disable_device(pdev);
 +	pci_set_drvdata(pdev, NULL);
 +
 +	return rc;
 +}
 +
 +static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
 +{
 +	struct net_device *dev = pci_get_drvdata(pdev);
 +	struct bnx2x *bp;
 +
 +	if (!dev) {
 +		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
 +		return;
 +	}
 +	bp = netdev_priv(dev);
 +
 +#ifdef BCM_CNIC
 +	/* Delete storage MAC address */
 +	if (!NO_FCOE(bp)) {
 +		rtnl_lock();
 +		dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 +		rtnl_unlock();
 +	}
 +#endif
 +
 +#ifdef BCM_DCBNL
 +	/* Delete app tlvs from dcbnl */
 +	bnx2x_dcbnl_update_applist(bp, true);
 +#endif
 +
 +	unregister_netdev(dev);
 +
 +	/* Delete all NAPI objects */
 +	bnx2x_del_all_napi(bp);
 +
 +	/* Power on: we can't let PCI layer write to us while we are in D3 */
 +	bnx2x_set_power_state(bp, PCI_D0);
 +
 +	/* Disable MSI/MSI-X */
 +	bnx2x_disable_msi(bp);
 +
 +	/* Power off */
 +	bnx2x_set_power_state(bp, PCI_D3hot);
 +
 +	/* Make sure RESET task is not scheduled before continuing */
 +	cancel_delayed_work_sync(&bp->sp_rtnl_task);
 +
 +	if (bp->regview)
 +		iounmap(bp->regview);
 +
 +	if (bp->doorbells)
 +		iounmap(bp->doorbells);
 +
 +	bnx2x_free_mem_bp(bp);
 +
 +	free_netdev(dev);
 +
 +	if (atomic_read(&pdev->enable_cnt) == 1)
 +		pci_release_regions(pdev);
 +
 +	pci_disable_device(pdev);
 +	pci_set_drvdata(pdev, NULL);
 +}
 +
 +static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
 +{
 +	int i;
 +
 +	bp->state = BNX2X_STATE_ERROR;
 +
 +	bp->rx_mode = BNX2X_RX_MODE_NONE;
 +
 +#ifdef BCM_CNIC
 +	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
 +#endif
 +	/* Stop Tx */
 +	bnx2x_tx_disable(bp);
 +
 +	bnx2x_netif_stop(bp, 0);
 +
 +	del_timer_sync(&bp->timer);
 +
 +	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +
 +	/* Release IRQs */
 +	bnx2x_free_irq(bp);
 +
 +	/* Free SKBs, SGEs, TPA pool and driver internals */
 +	bnx2x_free_skbs(bp);
 +
 +	for_each_rx_queue(bp, i)
 +		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 +
 +	bnx2x_free_mem(bp);
 +
 +	bp->state = BNX2X_STATE_CLOSED;
 +
 +	netif_carrier_off(bp->dev);
 +
 +	return 0;
 +}
 +
 +static void bnx2x_eeh_recover(struct bnx2x *bp)
 +{
 +	u32 val;
 +
 +	mutex_init(&bp->port.phy_mutex);
 +
 +	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
 +	bp->link_params.shmem_base = bp->common.shmem_base;
 +	BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
 +
 +	if (!bp->common.shmem_base ||
 +	    (bp->common.shmem_base < 0xA0000) ||
 +	    (bp->common.shmem_base >= 0xC0000)) {
 +		BNX2X_DEV_INFO("MCP not active\n");
 +		bp->flags |= NO_MCP_FLAG;
 +		return;
 +	}
 +
 +	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
 +	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
 +		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
 +		BNX2X_ERR("BAD MCP validity signature\n");
 +
 +	if (!BP_NOMCP(bp)) {
 +		bp->fw_seq =
 +		    (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
 +		    DRV_MSG_SEQ_NUMBER_MASK);
 +		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
 +	}
 +}
 +
 +/**
 + * bnx2x_io_error_detected - called when PCI error is detected
 + * @pdev: Pointer to PCI device
 + * @state: The current pci connection state
 + *
 + * This function is called after a PCI bus error affecting
 + * this device has been detected.
 + */
 +static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
 +						pci_channel_state_t state)
 +{
 +	struct net_device *dev = pci_get_drvdata(pdev);
 +	struct bnx2x *bp = netdev_priv(dev);
 +
 +	rtnl_lock();
 +
 +	netif_device_detach(dev);
 +
 +	if (state == pci_channel_io_perm_failure) {
 +		rtnl_unlock();
 +		return PCI_ERS_RESULT_DISCONNECT;
 +	}
 +
 +	if (netif_running(dev))
 +		bnx2x_eeh_nic_unload(bp);
 +
 +	pci_disable_device(pdev);
 +
 +	rtnl_unlock();
 +
 +	/* Request a slot reset */
 +	return PCI_ERS_RESULT_NEED_RESET;
 +}
 +
 +/**
 + * bnx2x_io_slot_reset - called after the PCI bus has been reset
 + * @pdev: Pointer to PCI device
 + *
 + * Restart the card from scratch, as if from a cold-boot.
 + */
 +static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
 +{
 +	struct net_device *dev = pci_get_drvdata(pdev);
 +	struct bnx2x *bp = netdev_priv(dev);
 +
 +	rtnl_lock();
 +
 +	if (pci_enable_device(pdev)) {
 +		dev_err(&pdev->dev,
 +			"Cannot re-enable PCI device after reset\n");
 +		rtnl_unlock();
 +		return PCI_ERS_RESULT_DISCONNECT;
 +	}
 +
 +	pci_set_master(pdev);
 +	pci_restore_state(pdev);
 +
 +	if (netif_running(dev))
 +		bnx2x_set_power_state(bp, PCI_D0);
 +
 +	rtnl_unlock();
 +
 +	return PCI_ERS_RESULT_RECOVERED;
 +}
 +
 +/**
 + * bnx2x_io_resume - called when traffic can start flowing again
 + * @pdev: Pointer to PCI device
 + *
 + * This callback is called when the error recovery driver tells us that
 + * its OK to resume normal operation.
 + */
 +static void bnx2x_io_resume(struct pci_dev *pdev)
 +{
 +	struct net_device *dev = pci_get_drvdata(pdev);
 +	struct bnx2x *bp = netdev_priv(dev);
 +
 +	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 +		netdev_err(bp->dev, "Handling parity error recovery. "
 +				    "Try again later\n");
 +		return;
 +	}
 +
 +	rtnl_lock();
 +
 +	bnx2x_eeh_recover(bp);
 +
 +	if (netif_running(dev))
 +		bnx2x_nic_load(bp, LOAD_NORMAL);
 +
 +	netif_device_attach(dev);
 +
 +	rtnl_unlock();
 +}
 +
 +static struct pci_error_handlers bnx2x_err_handler = {
 +	.error_detected = bnx2x_io_error_detected,
 +	.slot_reset     = bnx2x_io_slot_reset,
 +	.resume         = bnx2x_io_resume,
 +};
 +
 +static struct pci_driver bnx2x_pci_driver = {
 +	.name        = DRV_MODULE_NAME,
 +	.id_table    = bnx2x_pci_tbl,
 +	.probe       = bnx2x_init_one,
 +	.remove      = __devexit_p(bnx2x_remove_one),
 +	.suspend     = bnx2x_suspend,
 +	.resume      = bnx2x_resume,
 +	.err_handler = &bnx2x_err_handler,
 +};
 +
 +static int __init bnx2x_init(void)
 +{
 +	int ret;
 +
 +	pr_info("%s", version);
 +
 +	bnx2x_wq = create_singlethread_workqueue("bnx2x");
 +	if (bnx2x_wq == NULL) {
 +		pr_err("Cannot create workqueue\n");
 +		return -ENOMEM;
 +	}
 +
 +	ret = pci_register_driver(&bnx2x_pci_driver);
 +	if (ret) {
 +		pr_err("Cannot register driver\n");
 +		destroy_workqueue(bnx2x_wq);
 +	}
 +	return ret;
 +}
 +
 +static void __exit bnx2x_cleanup(void)
 +{
 +	pci_unregister_driver(&bnx2x_pci_driver);
 +
 +	destroy_workqueue(bnx2x_wq);
 +}
 +
 +void bnx2x_notify_link_changed(struct bnx2x *bp)
 +{
 +	REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
 +}
 +
 +module_init(bnx2x_init);
 +module_exit(bnx2x_cleanup);
 +
 +#ifdef BCM_CNIC
 +/**
 + * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
 + *
 + * @bp:		driver handle
 + * @set:	set or clear the CAM entry
 + *
 + * This function will wait until the ramdord completion returns.
 + * Return 0 if success, -ENODEV if ramrod doesn't return.
 + */
 +static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
 +{
 +	unsigned long ramrod_flags = 0;
 +
 +	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 +	return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
 +				 &bp->iscsi_l2_mac_obj, true,
 +				 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
 +}
 +
 +/* count denotes the number of new completions we have seen */
 +static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
 +{
 +	struct eth_spe *spe;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +	if (unlikely(bp->panic))
 +		return;
 +#endif
 +
 +	spin_lock_bh(&bp->spq_lock);
 +	BUG_ON(bp->cnic_spq_pending < count);
 +	bp->cnic_spq_pending -= count;
 +
 +
 +	for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
 +		u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
 +				& SPE_HDR_CONN_TYPE) >>
 +				SPE_HDR_CONN_TYPE_SHIFT;
 +		u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
 +				>> SPE_HDR_CMD_ID_SHIFT) & 0xff;
 +
 +		/* Set validation for iSCSI L2 client before sending SETUP
 +		 *  ramrod
 +		 */
 +		if (type == ETH_CONNECTION_TYPE) {
 +			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
 +				bnx2x_set_ctx_validation(bp, &bp->context.
 +					vcxt[BNX2X_ISCSI_ETH_CID].eth,
 +					BNX2X_ISCSI_ETH_CID);
 +		}
 +
 +		/*
 +		 * There may be not more than 8 L2, not more than 8 L5 SPEs
 +		 * and in the air. We also check that number of outstanding
 +		 * COMMON ramrods is not more than the EQ and SPQ can
 +		 * accommodate.
 +		 */
 +		if (type == ETH_CONNECTION_TYPE) {
 +			if (!atomic_read(&bp->cq_spq_left))
 +				break;
 +			else
 +				atomic_dec(&bp->cq_spq_left);
 +		} else if (type == NONE_CONNECTION_TYPE) {
 +			if (!atomic_read(&bp->eq_spq_left))
 +				break;
 +			else
 +				atomic_dec(&bp->eq_spq_left);
 +		} else if ((type == ISCSI_CONNECTION_TYPE) ||
 +			   (type == FCOE_CONNECTION_TYPE)) {
 +			if (bp->cnic_spq_pending >=
 +			    bp->cnic_eth_dev.max_kwqe_pending)
 +				break;
 +			else
 +				bp->cnic_spq_pending++;
 +		} else {
 +			BNX2X_ERR("Unknown SPE type: %d\n", type);
 +			bnx2x_panic();
 +			break;
 +		}
 +
 +		spe = bnx2x_sp_get_next(bp);
 +		*spe = *bp->cnic_kwq_cons;
 +
 +		DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
 +		   bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
 +
 +		if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
 +			bp->cnic_kwq_cons = bp->cnic_kwq;
 +		else
 +			bp->cnic_kwq_cons++;
 +	}
 +	bnx2x_sp_prod_update(bp);
 +	spin_unlock_bh(&bp->spq_lock);
 +}
 +
 +static int bnx2x_cnic_sp_queue(struct net_device *dev,
 +			       struct kwqe_16 *kwqes[], u32 count)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +	int i;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +	if (unlikely(bp->panic))
 +		return -EIO;
 +#endif
 +
 +	spin_lock_bh(&bp->spq_lock);
 +
 +	for (i = 0; i < count; i++) {
 +		struct eth_spe *spe = (struct eth_spe *)kwqes[i];
 +
 +		if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
 +			break;
 +
 +		*bp->cnic_kwq_prod = *spe;
 +
 +		bp->cnic_kwq_pending++;
 +
 +		DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
 +		   spe->hdr.conn_and_cmd_data, spe->hdr.type,
 +		   spe->data.update_data_addr.hi,
 +		   spe->data.update_data_addr.lo,
 +		   bp->cnic_kwq_pending);
 +
 +		if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
 +			bp->cnic_kwq_prod = bp->cnic_kwq;
 +		else
 +			bp->cnic_kwq_prod++;
 +	}
 +
 +	spin_unlock_bh(&bp->spq_lock);
 +
 +	if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
 +		bnx2x_cnic_sp_post(bp, 0);
 +
 +	return i;
 +}
 +
 +static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
 +{
 +	struct cnic_ops *c_ops;
 +	int rc = 0;
 +
 +	mutex_lock(&bp->cnic_mutex);
 +	c_ops = rcu_dereference_protected(bp->cnic_ops,
 +					  lockdep_is_held(&bp->cnic_mutex));
 +	if (c_ops)
 +		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
 +	mutex_unlock(&bp->cnic_mutex);
 +
 +	return rc;
 +}
 +
 +static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
 +{
 +	struct cnic_ops *c_ops;
 +	int rc = 0;
 +
 +	rcu_read_lock();
 +	c_ops = rcu_dereference(bp->cnic_ops);
 +	if (c_ops)
 +		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
 +	rcu_read_unlock();
 +
 +	return rc;
 +}
 +
 +/*
 + * for commands that have no data
 + */
 +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
 +{
 +	struct cnic_ctl_info ctl = {0};
 +
 +	ctl.cmd = cmd;
 +
 +	return bnx2x_cnic_ctl_send(bp, &ctl);
 +}
 +
 +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
 +{
 +	struct cnic_ctl_info ctl = {0};
 +
 +	/* first we tell CNIC and only then we count this as a completion */
 +	ctl.cmd = CNIC_CTL_COMPLETION_CMD;
 +	ctl.data.comp.cid = cid;
 +	ctl.data.comp.error = err;
 +
 +	bnx2x_cnic_ctl_send_bh(bp, &ctl);
 +	bnx2x_cnic_sp_post(bp, 0);
 +}
 +
 +
 +/* Called with netif_addr_lock_bh() taken.
 + * Sets an rx_mode config for an iSCSI ETH client.
 + * Doesn't block.
 + * Completion should be checked outside.
 + */
 +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
 +{
 +	unsigned long accept_flags = 0, ramrod_flags = 0;
 +	u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
 +	int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
 +
 +	if (start) {
 +		/* Start accepting on iSCSI L2 ring. Accept all multicasts
 +		 * because it's the only way for UIO Queue to accept
 +		 * multicasts (in non-promiscuous mode only one Queue per
 +		 * function will receive multicast packets (leading in our
 +		 * case).
 +		 */
 +		__set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
 +		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
 +		__set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
 +		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
 +
 +		/* Clear STOP_PENDING bit if START is requested */
 +		clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
 +
 +		sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
 +	} else
 +		/* Clear START_PENDING bit if STOP is requested */
 +		clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
 +
 +	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
 +		set_bit(sched_state, &bp->sp_state);
 +	else {
 +		__set_bit(RAMROD_RX, &ramrod_flags);
 +		bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
 +				    ramrod_flags);
 +	}
 +}
 +
 +
 +static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +	int rc = 0;
 +
 +	switch (ctl->cmd) {
 +	case DRV_CTL_CTXTBL_WR_CMD: {
 +		u32 index = ctl->data.io.offset;
 +		dma_addr_t addr = ctl->data.io.dma_addr;
 +
 +		bnx2x_ilt_wr(bp, index, addr);
 +		break;
 +	}
 +
 +	case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
 +		int count = ctl->data.credit.credit_count;
 +
 +		bnx2x_cnic_sp_post(bp, count);
 +		break;
 +	}
 +
 +	/* rtnl_lock is held.  */
 +	case DRV_CTL_START_L2_CMD: {
 +		struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +		unsigned long sp_bits = 0;
 +
 +		/* Configure the iSCSI classification object */
 +		bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
 +				   cp->iscsi_l2_client_id,
 +				   cp->iscsi_l2_cid, BP_FUNC(bp),
 +				   bnx2x_sp(bp, mac_rdata),
 +				   bnx2x_sp_mapping(bp, mac_rdata),
 +				   BNX2X_FILTER_MAC_PENDING,
 +				   &bp->sp_state, BNX2X_OBJ_TYPE_RX,
 +				   &bp->macs_pool);
 +
 +		/* Set iSCSI MAC address */
 +		rc = bnx2x_set_iscsi_eth_mac_addr(bp);
 +		if (rc)
 +			break;
 +
 +		mmiowb();
 +		barrier();
 +
 +		/* Start accepting on iSCSI L2 ring */
 +
 +		netif_addr_lock_bh(dev);
 +		bnx2x_set_iscsi_eth_rx_mode(bp, true);
 +		netif_addr_unlock_bh(dev);
 +
 +		/* bits to wait on */
 +		__set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
 +		__set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
 +
 +		if (!bnx2x_wait_sp_comp(bp, sp_bits))
 +			BNX2X_ERR("rx_mode completion timed out!\n");
 +
 +		break;
 +	}
 +
 +	/* rtnl_lock is held.  */
 +	case DRV_CTL_STOP_L2_CMD: {
 +		unsigned long sp_bits = 0;
 +
 +		/* Stop accepting on iSCSI L2 ring */
 +		netif_addr_lock_bh(dev);
 +		bnx2x_set_iscsi_eth_rx_mode(bp, false);
 +		netif_addr_unlock_bh(dev);
 +
 +		/* bits to wait on */
 +		__set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
 +		__set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
 +
 +		if (!bnx2x_wait_sp_comp(bp, sp_bits))
 +			BNX2X_ERR("rx_mode completion timed out!\n");
 +
 +		mmiowb();
 +		barrier();
 +
 +		/* Unset iSCSI L2 MAC */
 +		rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
 +					BNX2X_ISCSI_ETH_MAC, true);
 +		break;
 +	}
 +	case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
 +		int count = ctl->data.credit.credit_count;
 +
 +		smp_mb__before_atomic_inc();
 +		atomic_add(count, &bp->cq_spq_left);
 +		smp_mb__after_atomic_inc();
 +		break;
 +	}
 +
 +	default:
 +		BNX2X_ERR("unknown command %x\n", ctl->cmd);
 +		rc = -EINVAL;
 +	}
 +
 +	return rc;
 +}
 +
 +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
 +{
 +	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +	if (bp->flags & USING_MSIX_FLAG) {
 +		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
 +		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
 +		cp->irq_arr[0].vector = bp->msix_table[1].vector;
 +	} else {
 +		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
 +		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
 +	}
 +	if (!CHIP_IS_E1x(bp))
 +		cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
 +	else
 +		cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
 +
 +	cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
 +	cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
 +	cp->irq_arr[1].status_blk = bp->def_status_blk;
 +	cp->irq_arr[1].status_blk_num = DEF_SB_ID;
 +	cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
 +
 +	cp->num_irq = 2;
 +}
 +
 +static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
 +			       void *data)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +	if (ops == NULL)
 +		return -EINVAL;
 +
 +	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
 +	if (!bp->cnic_kwq)
 +		return -ENOMEM;
 +
 +	bp->cnic_kwq_cons = bp->cnic_kwq;
 +	bp->cnic_kwq_prod = bp->cnic_kwq;
 +	bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
 +
 +	bp->cnic_spq_pending = 0;
 +	bp->cnic_kwq_pending = 0;
 +
 +	bp->cnic_data = data;
 +
 +	cp->num_irq = 0;
 +	cp->drv_state |= CNIC_DRV_STATE_REGD;
 +	cp->iro_arr = bp->iro_arr;
 +
 +	bnx2x_setup_cnic_irq_info(bp);
 +
 +	rcu_assign_pointer(bp->cnic_ops, ops);
 +
 +	return 0;
 +}
 +
 +static int bnx2x_unregister_cnic(struct net_device *dev)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +	mutex_lock(&bp->cnic_mutex);
 +	cp->drv_state = 0;
 +	rcu_assign_pointer(bp->cnic_ops, NULL);
 +	mutex_unlock(&bp->cnic_mutex);
 +	synchronize_rcu();
 +	kfree(bp->cnic_kwq);
 +	bp->cnic_kwq = NULL;
 +
 +	return 0;
 +}
 +
 +struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
 +{
 +	struct bnx2x *bp = netdev_priv(dev);
 +	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +	/* If both iSCSI and FCoE are disabled - return NULL in
 +	 * order to indicate CNIC that it should not try to work
 +	 * with this device.
 +	 */
 +	if (NO_ISCSI(bp) && NO_FCOE(bp))
 +		return NULL;
 +
 +	cp->drv_owner = THIS_MODULE;
 +	cp->chip_id = CHIP_ID(bp);
 +	cp->pdev = bp->pdev;
 +	cp->io_base = bp->regview;
 +	cp->io_base2 = bp->doorbells;
 +	cp->max_kwqe_pending = 8;
 +	cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
 +	cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
 +			     bnx2x_cid_ilt_lines(bp);
 +	cp->ctx_tbl_len = CNIC_ILT_LINES;
 +	cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
 +	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
 +	cp->drv_ctl = bnx2x_drv_ctl;
 +	cp->drv_register_cnic = bnx2x_register_cnic;
 +	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
 +	cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
 +	cp->iscsi_l2_client_id =
 +		bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
 +	cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
 +
 +	if (NO_ISCSI_OOO(bp))
 +		cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
 +
 +	if (NO_ISCSI(bp))
 +		cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
 +
 +	if (NO_FCOE(bp))
 +		cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
 +
 +	DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
 +			 "starting cid %d\n",
 +	   cp->ctx_blk_size,
 +	   cp->ctx_tbl_offset,
 +	   cp->ctx_tbl_len,
 +	   cp->starting_cid);
 +	return cp;
 +}
 +EXPORT_SYMBOL(bnx2x_cnic_probe);
 +
 +#endif /* BCM_CNIC */
 +
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 750e844,fc7bd0f..fc7bd0f
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@@ -1384,6 -1384,18 +1384,18 @@@
     Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
  #define MISC_REG_AEU_ENABLE4_PXP_0				 0xa108
  #define MISC_REG_AEU_ENABLE4_PXP_1				 0xa1a8
+ /* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
+  * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+  * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+  * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+  * parity; [31-10] Reserved; */
+ #define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0			 0xa688
+ /* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
+  * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+  * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+  * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+  * parity; [31-10] Reserved; */
+ #define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0			 0xa6b0
  /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
     128 bit vector */
  #define MISC_REG_AEU_GENERAL_ATTN_0				 0xa000
diff --combined drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 805076c,da5a5d9..da5a5d9
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@@ -1146,12 -1146,14 +1146,14 @@@ static void cxgb_redirect(struct dst_en
  		if (te && te->ctx && te->client && te->client->redirect) {
  			update_tcb = te->client->redirect(te->ctx, old, new, e);
  			if (update_tcb) {
+ 				rcu_read_lock();
  				l2t_hold(L2DATA(tdev), e);
+ 				rcu_read_unlock();
  				set_l2t_ix(tdev, tid, e);
  			}
  		}
  	}
- 	l2t_release(L2DATA(tdev), e);
+ 	l2t_release(tdev, e);
  }
  
  /*
@@@ -1264,7 -1266,7 +1266,7 @@@ int cxgb3_offload_activate(struct adapt
  		goto out_free;
  
  	err = -ENOMEM;
- 	L2DATA(dev) = t3_init_l2t(l2t_capacity);
+ 	RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
  	if (!L2DATA(dev))
  		goto out_free;
  
@@@ -1298,16 -1300,24 +1300,24 @@@
  
  out_free_l2t:
  	t3_free_l2t(L2DATA(dev));
- 	L2DATA(dev) = NULL;
+ 	rcu_assign_pointer(dev->l2opt, NULL);
  out_free:
  	kfree(t);
  	return err;
  }
  
+ static void clean_l2_data(struct rcu_head *head)
+ {
+ 	struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
+ 	t3_free_l2t(d);
+ }
+ 
+ 
  void cxgb3_offload_deactivate(struct adapter *adapter)
  {
  	struct t3cdev *tdev = &adapter->tdev;
  	struct t3c_data *t = T3C_DATA(tdev);
+ 	struct l2t_data *d;
  
  	remove_adapter(adapter);
  	if (list_empty(&adapter_list))
@@@ -1315,8 -1325,11 +1325,11 @@@
  
  	free_tid_maps(&t->tid_maps);
  	T3C_DATA(tdev) = NULL;
- 	t3_free_l2t(L2DATA(tdev));
- 	L2DATA(tdev) = NULL;
+ 	rcu_read_lock();
+ 	d = L2DATA(tdev);
+ 	rcu_read_unlock();
+ 	rcu_assign_pointer(tdev->l2opt, NULL);
+ 	call_rcu(&d->rcu_head, clean_l2_data);
  	if (t->nofail_skb)
  		kfree_skb(t->nofail_skb);
  	kfree(t);
diff --combined drivers/net/ethernet/chelsio/cxgb3/l2t.c
index f452c40,4154097..4154097
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@@ -300,14 -300,21 +300,21 @@@ static inline void reuse_entry(struct l
  struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
  			     struct net_device *dev)
  {
- 	struct l2t_entry *e;
- 	struct l2t_data *d = L2DATA(cdev);
+ 	struct l2t_entry *e = NULL;
+ 	struct l2t_data *d;
+ 	int hash;
  	u32 addr = *(u32 *) neigh->primary_key;
  	int ifidx = neigh->dev->ifindex;
  	struct port_info *p = netdev_priv(dev);
  	int smt_idx = p->port_id;
  
+ 	rcu_read_lock();
+ 	d = L2DATA(cdev);
+ 	if (!d)
+ 		goto done_rcu;
+ 
+ 	hash = arp_hash(addr, ifidx, d);
+ 
  	write_lock_bh(&d->lock);
  	for (e = d->l2tab[hash].first; e; e = e->next)
  		if (e->addr == addr && e->ifindex == ifidx &&
@@@ -338,6 -345,8 +345,8 @@@
  	}
  done:
  	write_unlock_bh(&d->lock);
+ done_rcu:
+ 	rcu_read_unlock();
  	return e;
  }
  
diff --combined drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 7a12d52,c5f5479..c5f5479
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@@ -76,6 -76,7 +76,7 @@@ struct l2t_data 
  	atomic_t nfree;		/* number of free entries */
  	rwlock_t lock;
  	struct l2t_entry l2tab[0];
+ 	struct rcu_head rcu_head;	/* to handle rcu cleanup */
  };
  
  typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
@@@ -99,7 -100,7 +100,7 @@@ static inline void set_arp_failure_hand
  /*
   * Getting to the L2 data from an offload device.
   */
- #define L2DATA(dev) ((dev)->l2opt)
+ #define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
  
  #define W_TCB_L2T_IX    0
  #define S_TCB_L2T_IX    7
@@@ -126,15 -127,22 +127,22 @@@ static inline int l2t_send(struct t3cde
  	return t3_l2t_send_slow(dev, skb, e);
  }
  
- static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
+ static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
  {
- 	if (atomic_dec_and_test(&e->refcnt))
+ 	struct l2t_data *d;
+ 
+ 	rcu_read_lock();
+ 	d = L2DATA(t);
+ 
+ 	if (atomic_dec_and_test(&e->refcnt) && d)
  		t3_l2e_free(d, e);
+ 
+ 	rcu_read_unlock();
  }
  
  static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
  {
- 	if (atomic_add_return(1, &e->refcnt) == 1)	/* 0 -> 1 transition */
+ 	if (d && atomic_add_return(1, &e->refcnt) == 1)	/* 0 -> 1 transition */
  		atomic_dec(&d->nfree);
  }
  
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 40b395f,0000000..4c8f42a
mode 100644,000000..100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@@ -1,3809 -1,0 +1,3812 @@@
 +/*
 + * This file is part of the Chelsio T4 Ethernet driver for Linux.
 + *
 + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
 + *
 + * This software is available to you under a choice of one of two
 + * licenses.  You may choose to be licensed under the terms of the GNU
 + * General Public License (GPL) Version 2, available from the file
 + * COPYING in the main directory of this source tree, or the
 + * OpenIB.org BSD license below:
 + *
 + *     Redistribution and use in source and binary forms, with or
 + *     without modification, are permitted provided that the following
 + *     conditions are met:
 + *
 + *      - Redistributions of source code must retain the above
 + *        copyright notice, this list of conditions and the following
 + *        disclaimer.
 + *
 + *      - Redistributions in binary form must reproduce the above
 + *        copyright notice, this list of conditions and the following
 + *        disclaimer in the documentation and/or other materials
 + *        provided with the distribution.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 + * SOFTWARE.
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/bitmap.h>
 +#include <linux/crc32.h>
 +#include <linux/ctype.h>
 +#include <linux/debugfs.h>
 +#include <linux/err.h>
 +#include <linux/etherdevice.h>
 +#include <linux/firmware.h>
 +#include <linux/if.h>
 +#include <linux/if_vlan.h>
 +#include <linux/init.h>
 +#include <linux/log2.h>
 +#include <linux/mdio.h>
 +#include <linux/module.h>
 +#include <linux/moduleparam.h>
 +#include <linux/mutex.h>
 +#include <linux/netdevice.h>
 +#include <linux/pci.h>
 +#include <linux/aer.h>
 +#include <linux/rtnetlink.h>
 +#include <linux/sched.h>
 +#include <linux/seq_file.h>
 +#include <linux/sockios.h>
 +#include <linux/vmalloc.h>
 +#include <linux/workqueue.h>
 +#include <net/neighbour.h>
 +#include <net/netevent.h>
 +#include <asm/uaccess.h>
 +
 +#include "cxgb4.h"
 +#include "t4_regs.h"
 +#include "t4_msg.h"
 +#include "t4fw_api.h"
 +#include "l2t.h"
 +
 +#define DRV_VERSION "1.3.0-ko"
 +#define DRV_DESC "Chelsio T4 Network Driver"
 +
 +/*
 + * Max interrupt hold-off timer value in us.  Queues fall back to this value
 + * under extreme memory pressure so it's largish to give the system time to
 + * recover.
 + */
 +#define MAX_SGE_TIMERVAL 200U
 +
 +#ifdef CONFIG_PCI_IOV
 +/*
 + * Virtual Function provisioning constants.  We need two extra Ingress Queues
 + * with Interrupt capability to serve as the VF's Firmware Event Queue and
 + * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
 + * Lists associated with them).  For each Ethernet/Control Egress Queue and
 + * for each Free List, we need an Egress Context.
 + */
 +enum {
 +	VFRES_NPORTS = 1,		/* # of "ports" per VF */
 +	VFRES_NQSETS = 2,		/* # of "Queue Sets" per VF */
 +
 +	VFRES_NVI = VFRES_NPORTS,	/* # of Virtual Interfaces */
 +	VFRES_NETHCTRL = VFRES_NQSETS,	/* # of EQs used for ETH or CTRL Qs */
 +	VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
 +	VFRES_NIQ = 0,			/* # of non-fl/int ingress queues */
 +	VFRES_NEQ = VFRES_NQSETS*2,	/* # of egress queues */
 +	VFRES_TC = 0,			/* PCI-E traffic class */
 +	VFRES_NEXACTF = 16,		/* # of exact MPS filters */
 +
 +	VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
 +	VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
 +};
 +
 +/*
 + * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
 + * static and likely not to be useful in the long run.  We really need to
 + * implement some form of persistent configuration which the firmware
 + * controls.
 + */
 +static unsigned int pfvfres_pmask(struct adapter *adapter,
 +				  unsigned int pf, unsigned int vf)
 +{
 +	unsigned int portn, portvec;
 +
 +	/*
 +	 * Give PF's access to all of the ports.
 +	 */
 +	if (vf == 0)
 +		return FW_PFVF_CMD_PMASK_MASK;
 +
 +	/*
 +	 * For VFs, we'll assign them access to the ports based purely on the
 +	 * PF.  We assign active ports in order, wrapping around if there are
 +	 * fewer active ports than PFs: e.g. active port[pf % nports].
 +	 * Unfortunately the adapter's port_info structs haven't been
 +	 * initialized yet so we have to compute this.
 +	 */
 +	if (adapter->params.nports == 0)
 +		return 0;
 +
 +	portn = pf % adapter->params.nports;
 +	portvec = adapter->params.portvec;
 +	for (;;) {
 +		/*
 +		 * Isolate the lowest set bit in the port vector.  If we're at
 +		 * the port number that we want, return that as the pmask.
 +		 * otherwise mask that bit out of the port vector and
 +		 * decrement our port number ...
 +		 */
 +		unsigned int pmask = portvec ^ (portvec & (portvec-1));
 +		if (portn == 0)
 +			return pmask;
 +		portn--;
 +		portvec &= ~pmask;
 +	}
 +	/*NOTREACHED*/
 +}
 +#endif
 +
 +enum {
 +	MEMWIN0_APERTURE = 65536,
 +	MEMWIN0_BASE     = 0x30000,
 +	MEMWIN1_APERTURE = 32768,
 +	MEMWIN1_BASE     = 0x28000,
 +	MEMWIN2_APERTURE = 2048,
 +	MEMWIN2_BASE     = 0x1b800,
 +};
 +
 +enum {
 +	MAX_TXQ_ENTRIES      = 16384,
 +	MAX_CTRL_TXQ_ENTRIES = 1024,
 +	MAX_RSPQ_ENTRIES     = 16384,
 +	MAX_RX_BUFFERS       = 16384,
 +	MIN_TXQ_ENTRIES      = 32,
 +	MIN_CTRL_TXQ_ENTRIES = 32,
 +	MIN_RSPQ_ENTRIES     = 128,
 +	MIN_FL_ENTRIES       = 16
 +};
 +
 +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
 +			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 +			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 +
 +#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
 +
 +static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
 +	CH_DEVICE(0xa000, 0),  /* PE10K */
 +	CH_DEVICE(0x4001, -1),
 +	CH_DEVICE(0x4002, -1),
 +	CH_DEVICE(0x4003, -1),
 +	CH_DEVICE(0x4004, -1),
 +	CH_DEVICE(0x4005, -1),
 +	CH_DEVICE(0x4006, -1),
 +	CH_DEVICE(0x4007, -1),
 +	CH_DEVICE(0x4008, -1),
 +	CH_DEVICE(0x4009, -1),
 +	CH_DEVICE(0x400a, -1),
 +	CH_DEVICE(0x4401, 4),
 +	CH_DEVICE(0x4402, 4),
 +	CH_DEVICE(0x4403, 4),
 +	CH_DEVICE(0x4404, 4),
 +	CH_DEVICE(0x4405, 4),
 +	CH_DEVICE(0x4406, 4),
 +	CH_DEVICE(0x4407, 4),
 +	CH_DEVICE(0x4408, 4),
 +	CH_DEVICE(0x4409, 4),
 +	CH_DEVICE(0x440a, 4),
 +	{ 0, }
 +};
 +
 +#define FW_FNAME "cxgb4/t4fw.bin"
 +
 +MODULE_DESCRIPTION(DRV_DESC);
 +MODULE_AUTHOR("Chelsio Communications");
 +MODULE_LICENSE("Dual BSD/GPL");
 +MODULE_VERSION(DRV_VERSION);
 +MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
 +MODULE_FIRMWARE(FW_FNAME);
 +
 +static int dflt_msg_enable = DFLT_MSG_ENABLE;
 +
 +module_param(dflt_msg_enable, int, 0644);
 +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
 +
 +/*
 + * The driver uses the best interrupt scheme available on a platform in the
 + * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
 + * of these schemes the driver may consider as follows:
 + *
 + * msi = 2: choose from among all three options
 + * msi = 1: only consider MSI and INTx interrupts
 + * msi = 0: force INTx interrupts
 + */
 +static int msi = 2;
 +
 +module_param(msi, int, 0644);
 +MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
 +
 +/*
 + * Queue interrupt hold-off timer values.  Queues default to the first of these
 + * upon creation.
 + */
 +static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
 +
 +module_param_array(intr_holdoff, uint, NULL, 0644);
 +MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
 +		 "0..4 in microseconds");
 +
 +static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
 +
 +module_param_array(intr_cnt, uint, NULL, 0644);
 +MODULE_PARM_DESC(intr_cnt,
 +		 "thresholds 1..3 for queue interrupt packet counters");
 +
 +static int vf_acls;
 +
 +#ifdef CONFIG_PCI_IOV
 +module_param(vf_acls, bool, 0644);
 +MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
 +
 +static unsigned int num_vf[4];
 +
 +module_param_array(num_vf, uint, NULL, 0644);
 +MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
 +#endif
 +
 +static struct dentry *cxgb4_debugfs_root;
 +
 +static LIST_HEAD(adapter_list);
 +static DEFINE_MUTEX(uld_mutex);
 +static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
 +static const char *uld_str[] = { "RDMA", "iSCSI" };
 +
 +static void link_report(struct net_device *dev)
 +{
 +	if (!netif_carrier_ok(dev))
 +		netdev_info(dev, "link down\n");
 +	else {
 +		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
 +
 +		const char *s = "10Mbps";
 +		const struct port_info *p = netdev_priv(dev);
 +
 +		switch (p->link_cfg.speed) {
 +		case SPEED_10000:
 +			s = "10Gbps";
 +			break;
 +		case SPEED_1000:
 +			s = "1000Mbps";
 +			break;
 +		case SPEED_100:
 +			s = "100Mbps";
 +			break;
 +		}
 +
 +		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
 +			    fc[p->link_cfg.fc]);
 +	}
 +}
 +
 +void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
 +{
 +	struct net_device *dev = adapter->port[port_id];
 +
 +	/* Skip changes from disabled ports. */
 +	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
 +		if (link_stat)
 +			netif_carrier_on(dev);
 +		else
 +			netif_carrier_off(dev);
 +
 +		link_report(dev);
 +	}
 +}
 +
 +void t4_os_portmod_changed(const struct adapter *adap, int port_id)
 +{
 +	static const char *mod_str[] = {
 +		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
 +	};
 +
 +	const struct net_device *dev = adap->port[port_id];
 +	const struct port_info *pi = netdev_priv(dev);
 +
 +	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
 +		netdev_info(dev, "port module unplugged\n");
 +	else if (pi->mod_type < ARRAY_SIZE(mod_str))
 +		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
 +}
 +
 +/*
 + * Configure the exact and hash address filters to handle a port's multicast
 + * and secondary unicast MAC addresses.
 + */
 +static int set_addr_filters(const struct net_device *dev, bool sleep)
 +{
 +	u64 mhash = 0;
 +	u64 uhash = 0;
 +	bool free = true;
 +	u16 filt_idx[7];
 +	const u8 *addr[7];
 +	int ret, naddr = 0;
 +	const struct netdev_hw_addr *ha;
 +	int uc_cnt = netdev_uc_count(dev);
 +	int mc_cnt = netdev_mc_count(dev);
 +	const struct port_info *pi = netdev_priv(dev);
 +	unsigned int mb = pi->adapter->fn;
 +
 +	/* first do the secondary unicast addresses */
 +	netdev_for_each_uc_addr(ha, dev) {
 +		addr[naddr++] = ha->addr;
 +		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
 +			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
 +					naddr, addr, filt_idx, &uhash, sleep);
 +			if (ret < 0)
 +				return ret;
 +
 +			free = false;
 +			naddr = 0;
 +		}
 +	}
 +
 +	/* next set up the multicast addresses */
 +	netdev_for_each_mc_addr(ha, dev) {
 +		addr[naddr++] = ha->addr;
 +		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
 +			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
 +					naddr, addr, filt_idx, &mhash, sleep);
 +			if (ret < 0)
 +				return ret;
 +
 +			free = false;
 +			naddr = 0;
 +		}
 +	}
 +
 +	return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
 +				uhash | mhash, sleep);
 +}
 +
 +/*
 + * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
 + * If @mtu is -1 it is left unchanged.
 + */
 +static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
 +{
 +	int ret;
 +	struct port_info *pi = netdev_priv(dev);
 +
 +	ret = set_addr_filters(dev, sleep_ok);
 +	if (ret == 0)
 +		ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
 +				    (dev->flags & IFF_PROMISC) ? 1 : 0,
 +				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
 +				    sleep_ok);
 +	return ret;
 +}
 +
 +/**
 + *	link_start - enable a port
 + *	@dev: the port to enable
 + *
 + *	Performs the MAC and PHY actions needed to enable a port.
 + */
 +static int link_start(struct net_device *dev)
 +{
 +	int ret;
 +	struct port_info *pi = netdev_priv(dev);
 +	unsigned int mb = pi->adapter->fn;
 +
 +	/*
 +	 * We do not set address filters and promiscuity here, the stack does
 +	 * that step explicitly.
 +	 */
 +	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
 +			    !!(dev->features & NETIF_F_HW_VLAN_RX), true);
 +	if (ret == 0) {
 +		ret = t4_change_mac(pi->adapter, mb, pi->viid,
 +				    pi->xact_addr_filt, dev->dev_addr, true,
 +				    true);
 +		if (ret >= 0) {
 +			pi->xact_addr_filt = ret;
 +			ret = 0;
 +		}
 +	}
 +	if (ret == 0)
 +		ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
 +				    &pi->link_cfg);
 +	if (ret == 0)
 +		ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
 +	return ret;
 +}
 +
 +/*
 + * Response queue handler for the FW event queue.
 + */
 +static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
 +			  const struct pkt_gl *gl)
 +{
 +	u8 opcode = ((const struct rss_header *)rsp)->opcode;
 +
 +	rsp++;                                          /* skip RSS header */
 +	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
 +		const struct cpl_sge_egr_update *p = (void *)rsp;
 +		unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
 +		struct sge_txq *txq;
 +
 +		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
 +		txq->restarts++;
 +		if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
 +			struct sge_eth_txq *eq;
 +
 +			eq = container_of(txq, struct sge_eth_txq, q);
 +			netif_tx_wake_queue(eq->txq);
 +		} else {
 +			struct sge_ofld_txq *oq;
 +
 +			oq = container_of(txq, struct sge_ofld_txq, q);
 +			tasklet_schedule(&oq->qresume_tsk);
 +		}
 +	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
 +		const struct cpl_fw6_msg *p = (void *)rsp;
 +
 +		if (p->type == 0)
 +			t4_handle_fw_rpl(q->adap, p->data);
 +	} else if (opcode == CPL_L2T_WRITE_RPL) {
 +		const struct cpl_l2t_write_rpl *p = (void *)rsp;
 +
 +		do_l2t_write_rpl(q->adap, p);
 +	} else
 +		dev_err(q->adap->pdev_dev,
 +			"unexpected CPL %#x on FW event queue\n", opcode);
 +	return 0;
 +}
 +
 +/**
 + *	uldrx_handler - response queue handler for ULD queues
 + *	@q: the response queue that received the packet
 + *	@rsp: the response queue descriptor holding the offload message
 + *	@gl: the gather list of packet fragments
 + *
 + *	Deliver an ingress offload packet to a ULD.  All processing is done by
 + *	the ULD, we just maintain statistics.
 + */
 +static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
 +			 const struct pkt_gl *gl)
 +{
 +	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
 +
 +	if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
 +		rxq->stats.nomem++;
 +		return -1;
 +	}
 +	if (gl == NULL)
 +		rxq->stats.imm++;
 +	else if (gl == CXGB4_MSG_AN)
 +		rxq->stats.an++;
 +	else
 +		rxq->stats.pkts++;
 +	return 0;
 +}
 +
 +static void disable_msi(struct adapter *adapter)
 +{
 +	if (adapter->flags & USING_MSIX) {
 +		pci_disable_msix(adapter->pdev);
 +		adapter->flags &= ~USING_MSIX;
 +	} else if (adapter->flags & USING_MSI) {
 +		pci_disable_msi(adapter->pdev);
 +		adapter->flags &= ~USING_MSI;
 +	}
 +}
 +
 +/*
 + * Interrupt handler for non-data events used with MSI-X.
 + */
 +static irqreturn_t t4_nondata_intr(int irq, void *cookie)
 +{
 +	struct adapter *adap = cookie;
 +
 +	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
 +	if (v & PFSW) {
 +		adap->swintr = 1;
 +		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
 +	}
 +	t4_slow_intr_handler(adap);
 +	return IRQ_HANDLED;
 +}
 +
 +/*
 + * Name the MSI-X interrupts.
 + */
 +static void name_msix_vecs(struct adapter *adap)
 +{
 +	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
 +
 +	/* non-data interrupts */
 +	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
 +
 +	/* FW events */
 +	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
 +		 adap->port[0]->name);
 +
 +	/* Ethernet queues */
 +	for_each_port(adap, j) {
 +		struct net_device *d = adap->port[j];
 +		const struct port_info *pi = netdev_priv(d);
 +
 +		for (i = 0; i < pi->nqsets; i++, msi_idx++)
 +			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
 +				 d->name, i);
 +	}
 +
 +	/* offload queues */
 +	for_each_ofldrxq(&adap->sge, i)
 +		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
 +			 adap->port[0]->name, i);
 +
 +	for_each_rdmarxq(&adap->sge, i)
 +		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
 +			 adap->port[0]->name, i);
 +}
 +
 +static int request_msix_queue_irqs(struct adapter *adap)
 +{
 +	struct sge *s = &adap->sge;
 +	int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
 +
 +	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
 +			  adap->msix_info[1].desc, &s->fw_evtq);
 +	if (err)
 +		return err;
 +
 +	for_each_ethrxq(s, ethqidx) {
 +		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
 +				  adap->msix_info[msi].desc,
 +				  &s->ethrxq[ethqidx].rspq);
 +		if (err)
 +			goto unwind;
 +		msi++;
 +	}
 +	for_each_ofldrxq(s, ofldqidx) {
 +		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
 +				  adap->msix_info[msi].desc,
 +				  &s->ofldrxq[ofldqidx].rspq);
 +		if (err)
 +			goto unwind;
 +		msi++;
 +	}
 +	for_each_rdmarxq(s, rdmaqidx) {
 +		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
 +				  adap->msix_info[msi].desc,
 +				  &s->rdmarxq[rdmaqidx].rspq);
 +		if (err)
 +			goto unwind;
 +		msi++;
 +	}
 +	return 0;
 +
 +unwind:
 +	while (--rdmaqidx >= 0)
 +		free_irq(adap->msix_info[--msi].vec,
 +			 &s->rdmarxq[rdmaqidx].rspq);
 +	while (--ofldqidx >= 0)
 +		free_irq(adap->msix_info[--msi].vec,
 +			 &s->ofldrxq[ofldqidx].rspq);
 +	while (--ethqidx >= 0)
 +		free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
 +	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
 +	return err;
 +}
 +
 +static void free_msix_queue_irqs(struct adapter *adap)
 +{
 +	int i, msi = 2;
 +	struct sge *s = &adap->sge;
 +
 +	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
 +	for_each_ethrxq(s, i)
 +		free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
 +	for_each_ofldrxq(s, i)
 +		free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
 +	for_each_rdmarxq(s, i)
 +		free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
 +}
 +
 +/**
 + *	write_rss - write the RSS table for a given port
 + *	@pi: the port
 + *	@queues: array of queue indices for RSS
 + *
 + *	Sets up the portion of the HW RSS table for the port's VI to distribute
 + *	packets to the Rx queues in @queues.
 + */
 +static int write_rss(const struct port_info *pi, const u16 *queues)
 +{
 +	u16 *rss;
 +	int i, err;
 +	const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
 +
 +	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
 +	if (!rss)
 +		return -ENOMEM;
 +
 +	/* map the queue indices to queue ids */
 +	for (i = 0; i < pi->rss_size; i++, queues++)
 +		rss[i] = q[*queues].rspq.abs_id;
 +
 +	err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
 +				  pi->rss_size, rss, pi->rss_size);
 +	kfree(rss);
 +	return err;
 +}
 +
 +/**
 + *	setup_rss - configure RSS
 + *	@adap: the adapter
 + *
 + *	Sets up RSS for each port.
 + */
 +static int setup_rss(struct adapter *adap)
 +{
 +	int i, err;
 +
 +	for_each_port(adap, i) {
 +		const struct port_info *pi = adap2pinfo(adap, i);
 +
 +		err = write_rss(pi, pi->rss);
 +		if (err)
 +			return err;
 +	}
 +	return 0;
 +}
 +
 +/*
 + * Return the channel of the ingress queue with the given qid.
 + */
 +static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
 +{
 +	qid -= p->ingr_start;
 +	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
 +}
 +
 +/*
 + * Wait until all NAPI handlers are descheduled.
 + */
 +static void quiesce_rx(struct adapter *adap)
 +{
 +	int i;
 +
 +	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
 +		struct sge_rspq *q = adap->sge.ingr_map[i];
 +
 +		if (q && q->handler)
 +			napi_disable(&q->napi);
 +	}
 +}
 +
 +/*
 + * Enable NAPI scheduling and interrupt generation for all Rx queues.
 + */
 +static void enable_rx(struct adapter *adap)
 +{
 +	int i;
 +
 +	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
 +		struct sge_rspq *q = adap->sge.ingr_map[i];
 +
 +		if (!q)
 +			continue;
 +		if (q->handler)
 +			napi_enable(&q->napi);
 +		/* 0-increment GTS to start the timer and enable interrupts */
 +		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
 +			     SEINTARM(q->intr_params) |
 +			     INGRESSQID(q->cntxt_id));
 +	}
 +}
 +
 +/**
 + *	setup_sge_queues - configure SGE Tx/Rx/response queues
 + *	@adap: the adapter
 + *
 + *	Determines how many sets of SGE queues to use and initializes them.
 + *	We support multiple queue sets per port if we have MSI-X, otherwise
 + *	just one queue set per port.
 + */
 +static int setup_sge_queues(struct adapter *adap)
 +{
 +	int err, msi_idx, i, j;
 +	struct sge *s = &adap->sge;
 +
 +	bitmap_zero(s->starving_fl, MAX_EGRQ);
 +	bitmap_zero(s->txq_maperr, MAX_EGRQ);
 +
 +	if (adap->flags & USING_MSIX)
 +		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
 +	else {
 +		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
 +				       NULL, NULL);
 +		if (err)
 +			return err;
 +		msi_idx = -((int)s->intrq.abs_id + 1);
 +	}
 +
 +	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
 +			       msi_idx, NULL, fwevtq_handler);
 +	if (err) {
 +freeout:	t4_free_sge_resources(adap);
 +		return err;
 +	}
 +
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		struct port_info *pi = netdev_priv(dev);
 +		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
 +		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
 +
 +		for (j = 0; j < pi->nqsets; j++, q++) {
 +			if (msi_idx > 0)
 +				msi_idx++;
 +			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
 +					       msi_idx, &q->fl,
 +					       t4_ethrx_handler);
 +			if (err)
 +				goto freeout;
 +			q->rspq.idx = j;
 +			memset(&q->stats, 0, sizeof(q->stats));
 +		}
 +		for (j = 0; j < pi->nqsets; j++, t++) {
 +			err = t4_sge_alloc_eth_txq(adap, t, dev,
 +					netdev_get_tx_queue(dev, j),
 +					s->fw_evtq.cntxt_id);
 +			if (err)
 +				goto freeout;
 +		}
 +	}
 +
 +	j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
 +	for_each_ofldrxq(s, i) {
 +		struct sge_ofld_rxq *q = &s->ofldrxq[i];
 +		struct net_device *dev = adap->port[i / j];
 +
 +		if (msi_idx > 0)
 +			msi_idx++;
 +		err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
 +				       &q->fl, uldrx_handler);
 +		if (err)
 +			goto freeout;
 +		memset(&q->stats, 0, sizeof(q->stats));
 +		s->ofld_rxq[i] = q->rspq.abs_id;
 +		err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
 +					    s->fw_evtq.cntxt_id);
 +		if (err)
 +			goto freeout;
 +	}
 +
 +	for_each_rdmarxq(s, i) {
 +		struct sge_ofld_rxq *q = &s->rdmarxq[i];
 +
 +		if (msi_idx > 0)
 +			msi_idx++;
 +		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
 +				       msi_idx, &q->fl, uldrx_handler);
 +		if (err)
 +			goto freeout;
 +		memset(&q->stats, 0, sizeof(q->stats));
 +		s->rdma_rxq[i] = q->rspq.abs_id;
 +	}
 +
 +	for_each_port(adap, i) {
 +		/*
 +		 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
 +		 * have RDMA queues, and that's the right value.
 +		 */
 +		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
 +					    s->fw_evtq.cntxt_id,
 +					    s->rdmarxq[i].rspq.cntxt_id);
 +		if (err)
 +			goto freeout;
 +	}
 +
 +	t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
 +		     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
 +		     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
 +	return 0;
 +}
 +
 +/*
 + * Returns 0 if new FW was successfully loaded, a positive errno if a load was
 + * started but failed, and a negative errno if flash load couldn't start.
 + */
 +static int upgrade_fw(struct adapter *adap)
 +{
 +	int ret;
 +	u32 vers;
 +	const struct fw_hdr *hdr;
 +	const struct firmware *fw;
 +	struct device *dev = adap->pdev_dev;
 +
 +	ret = request_firmware(&fw, FW_FNAME, dev);
 +	if (ret < 0) {
 +		dev_err(dev, "unable to load firmware image " FW_FNAME
 +			", error %d\n", ret);
 +		return ret;
 +	}
 +
 +	hdr = (const struct fw_hdr *)fw->data;
 +	vers = ntohl(hdr->fw_ver);
 +	if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
 +		ret = -EINVAL;              /* wrong major version, won't do */
 +		goto out;
 +	}
 +
 +	/*
 +	 * If the flash FW is unusable or we found something newer, load it.
 +	 */
 +	if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
 +	    vers > adap->params.fw_vers) {
 +		ret = -t4_load_fw(adap, fw->data, fw->size);
 +		if (!ret)
 +			dev_info(dev, "firmware upgraded to version %pI4 from "
 +				 FW_FNAME "\n", &hdr->fw_ver);
 +	}
 +out:	release_firmware(fw);
 +	return ret;
 +}
 +
 +/*
 + * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
 + * The allocated memory is cleared.
 + */
 +void *t4_alloc_mem(size_t size)
 +{
 +	void *p = kzalloc(size, GFP_KERNEL);
 +
 +	if (!p)
 +		p = vzalloc(size);
 +	return p;
 +}
 +
 +/*
 + * Free memory allocated through alloc_mem().
 + */
 +static void t4_free_mem(void *addr)
 +{
 +	if (is_vmalloc_addr(addr))
 +		vfree(addr);
 +	else
 +		kfree(addr);
 +}
 +
 +static inline int is_offload(const struct adapter *adap)
 +{
 +	return adap->params.offload;
 +}
 +
 +/*
 + * Implementation of ethtool operations.
 + */
 +
 +static u32 get_msglevel(struct net_device *dev)
 +{
 +	return netdev2adap(dev)->msg_enable;
 +}
 +
 +static void set_msglevel(struct net_device *dev, u32 val)
 +{
 +	netdev2adap(dev)->msg_enable = val;
 +}
 +
 +static char stats_strings[][ETH_GSTRING_LEN] = {
 +	"TxOctetsOK         ",
 +	"TxFramesOK         ",
 +	"TxBroadcastFrames  ",
 +	"TxMulticastFrames  ",
 +	"TxUnicastFrames    ",
 +	"TxErrorFrames      ",
 +
 +	"TxFrames64         ",
 +	"TxFrames65To127    ",
 +	"TxFrames128To255   ",
 +	"TxFrames256To511   ",
 +	"TxFrames512To1023  ",
 +	"TxFrames1024To1518 ",
 +	"TxFrames1519ToMax  ",
 +
 +	"TxFramesDropped    ",
 +	"TxPauseFrames      ",
 +	"TxPPP0Frames       ",
 +	"TxPPP1Frames       ",
 +	"TxPPP2Frames       ",
 +	"TxPPP3Frames       ",
 +	"TxPPP4Frames       ",
 +	"TxPPP5Frames       ",
 +	"TxPPP6Frames       ",
 +	"TxPPP7Frames       ",
 +
 +	"RxOctetsOK         ",
 +	"RxFramesOK         ",
 +	"RxBroadcastFrames  ",
 +	"RxMulticastFrames  ",
 +	"RxUnicastFrames    ",
 +
 +	"RxFramesTooLong    ",
 +	"RxJabberErrors     ",
 +	"RxFCSErrors        ",
 +	"RxLengthErrors     ",
 +	"RxSymbolErrors     ",
 +	"RxRuntFrames       ",
 +
 +	"RxFrames64         ",
 +	"RxFrames65To127    ",
 +	"RxFrames128To255   ",
 +	"RxFrames256To511   ",
 +	"RxFrames512To1023  ",
 +	"RxFrames1024To1518 ",
 +	"RxFrames1519ToMax  ",
 +
 +	"RxPauseFrames      ",
 +	"RxPPP0Frames       ",
 +	"RxPPP1Frames       ",
 +	"RxPPP2Frames       ",
 +	"RxPPP3Frames       ",
 +	"RxPPP4Frames       ",
 +	"RxPPP5Frames       ",
 +	"RxPPP6Frames       ",
 +	"RxPPP7Frames       ",
 +
 +	"RxBG0FramesDropped ",
 +	"RxBG1FramesDropped ",
 +	"RxBG2FramesDropped ",
 +	"RxBG3FramesDropped ",
 +	"RxBG0FramesTrunc   ",
 +	"RxBG1FramesTrunc   ",
 +	"RxBG2FramesTrunc   ",
 +	"RxBG3FramesTrunc   ",
 +
 +	"TSO                ",
 +	"TxCsumOffload      ",
 +	"RxCsumGood         ",
 +	"VLANextractions    ",
 +	"VLANinsertions     ",
 +	"GROpackets         ",
 +	"GROmerged          ",
 +};
 +
 +static int get_sset_count(struct net_device *dev, int sset)
 +{
 +	switch (sset) {
 +	case ETH_SS_STATS:
 +		return ARRAY_SIZE(stats_strings);
 +	default:
 +		return -EOPNOTSUPP;
 +	}
 +}
 +
 +#define T4_REGMAP_SIZE (160 * 1024)
 +
 +static int get_regs_len(struct net_device *dev)
 +{
 +	return T4_REGMAP_SIZE;
 +}
 +
 +static int get_eeprom_len(struct net_device *dev)
 +{
 +	return EEPROMSIZE;
 +}
 +
 +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 +{
 +	struct adapter *adapter = netdev2adap(dev);
 +
 +	strcpy(info->driver, KBUILD_MODNAME);
 +	strcpy(info->version, DRV_VERSION);
 +	strcpy(info->bus_info, pci_name(adapter->pdev));
 +
 +	if (!adapter->params.fw_vers)
 +		strcpy(info->fw_version, "N/A");
 +	else
 +		snprintf(info->fw_version, sizeof(info->fw_version),
 +			"%u.%u.%u.%u, TP %u.%u.%u.%u",
 +			FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
 +			FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
 +			FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
 +			FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
 +			FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
 +			FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
 +			FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
 +			FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
 +}
 +
 +static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
 +{
 +	if (stringset == ETH_SS_STATS)
 +		memcpy(data, stats_strings, sizeof(stats_strings));
 +}
 +
 +/*
 + * port stats maintained per queue of the port.  They should be in the same
 + * order as in stats_strings above.
 + */
 +struct queue_port_stats {
 +	u64 tso;
 +	u64 tx_csum;
 +	u64 rx_csum;
 +	u64 vlan_ex;
 +	u64 vlan_ins;
 +	u64 gro_pkts;
 +	u64 gro_merged;
 +};
 +
 +static void collect_sge_port_stats(const struct adapter *adap,
 +		const struct port_info *p, struct queue_port_stats *s)
 +{
 +	int i;
 +	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
 +	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
 +
 +	memset(s, 0, sizeof(*s));
 +	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
 +		s->tso += tx->tso;
 +		s->tx_csum += tx->tx_cso;
 +		s->rx_csum += rx->stats.rx_cso;
 +		s->vlan_ex += rx->stats.vlan_ex;
 +		s->vlan_ins += tx->vlan_ins;
 +		s->gro_pkts += rx->stats.lro_pkts;
 +		s->gro_merged += rx->stats.lro_merged;
 +	}
 +}
 +
 +static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 +		      u64 *data)
 +{
 +	struct port_info *pi = netdev_priv(dev);
 +	struct adapter *adapter = pi->adapter;
 +
 +	t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
 +
 +	data += sizeof(struct port_stats) / sizeof(u64);
 +	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
 +}
 +
 +/*
 + * Return a version number to identify the type of adapter.  The scheme is:
 + * - bits 0..9: chip version
 + * - bits 10..15: chip revision
 + * - bits 16..23: register dump version
 + */
 +static inline unsigned int mk_adap_vers(const struct adapter *ap)
 +{
 +	return 4 | (ap->params.rev << 10) | (1 << 16);
 +}
 +
 +static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
 +			   unsigned int end)
 +{
 +	u32 *p = buf + start;
 +
 +	for ( ; start <= end; start += sizeof(u32))
 +		*p++ = t4_read_reg(ap, start);
 +}
 +
 +static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
 +		     void *buf)
 +{
 +	static const unsigned int reg_ranges[] = {
 +		0x1008, 0x1108,
 +		0x1180, 0x11b4,
 +		0x11fc, 0x123c,
 +		0x1300, 0x173c,
 +		0x1800, 0x18fc,
 +		0x3000, 0x30d8,
 +		0x30e0, 0x5924,
 +		0x5960, 0x59d4,
 +		0x5a00, 0x5af8,
 +		0x6000, 0x6098,
 +		0x6100, 0x6150,
 +		0x6200, 0x6208,
 +		0x6240, 0x6248,
 +		0x6280, 0x6338,
 +		0x6370, 0x638c,
 +		0x6400, 0x643c,
 +		0x6500, 0x6524,
 +		0x6a00, 0x6a38,
 +		0x6a60, 0x6a78,
 +		0x6b00, 0x6b84,
 +		0x6bf0, 0x6c84,
 +		0x6cf0, 0x6d84,
 +		0x6df0, 0x6e84,
 +		0x6ef0, 0x6f84,
 +		0x6ff0, 0x7084,
 +		0x70f0, 0x7184,
 +		0x71f0, 0x7284,
 +		0x72f0, 0x7384,
 +		0x73f0, 0x7450,
 +		0x7500, 0x7530,
 +		0x7600, 0x761c,
 +		0x7680, 0x76cc,
 +		0x7700, 0x7798,
 +		0x77c0, 0x77fc,
 +		0x7900, 0x79fc,
 +		0x7b00, 0x7c38,
 +		0x7d00, 0x7efc,
 +		0x8dc0, 0x8e1c,
 +		0x8e30, 0x8e78,
 +		0x8ea0, 0x8f6c,
 +		0x8fc0, 0x9074,
 +		0x90fc, 0x90fc,
 +		0x9400, 0x9458,
 +		0x9600, 0x96bc,
 +		0x9800, 0x9808,
 +		0x9820, 0x983c,
 +		0x9850, 0x9864,
 +		0x9c00, 0x9c6c,
 +		0x9c80, 0x9cec,
 +		0x9d00, 0x9d6c,
 +		0x9d80, 0x9dec,
 +		0x9e00, 0x9e6c,
 +		0x9e80, 0x9eec,
 +		0x9f00, 0x9f6c,
 +		0x9f80, 0x9fec,
 +		0xd004, 0xd03c,
 +		0xdfc0, 0xdfe0,
 +		0xe000, 0xea7c,
 +		0xf000, 0x11190,
 +		0x19040, 0x1906c,
 +		0x19078, 0x19080,
 +		0x1908c, 0x19124,
 +		0x19150, 0x191b0,
 +		0x191d0, 0x191e8,
 +		0x19238, 0x1924c,
 +		0x193f8, 0x19474,
 +		0x19490, 0x194f8,
 +		0x19800, 0x19f30,
 +		0x1a000, 0x1a06c,
 +		0x1a0b0, 0x1a120,
 +		0x1a128, 0x1a138,
 +		0x1a190, 0x1a1c4,
 +		0x1a1fc, 0x1a1fc,
 +		0x1e040, 0x1e04c,
 +		0x1e284, 0x1e28c,
 +		0x1e2c0, 0x1e2c0,
 +		0x1e2e0, 0x1e2e0,
 +		0x1e300, 0x1e384,
 +		0x1e3c0, 0x1e3c8,
 +		0x1e440, 0x1e44c,
 +		0x1e684, 0x1e68c,
 +		0x1e6c0, 0x1e6c0,
 +		0x1e6e0, 0x1e6e0,
 +		0x1e700, 0x1e784,
 +		0x1e7c0, 0x1e7c8,
 +		0x1e840, 0x1e84c,
 +		0x1ea84, 0x1ea8c,
 +		0x1eac0, 0x1eac0,
 +		0x1eae0, 0x1eae0,
 +		0x1eb00, 0x1eb84,
 +		0x1ebc0, 0x1ebc8,
 +		0x1ec40, 0x1ec4c,
 +		0x1ee84, 0x1ee8c,
 +		0x1eec0, 0x1eec0,
 +		0x1eee0, 0x1eee0,
 +		0x1ef00, 0x1ef84,
 +		0x1efc0, 0x1efc8,
 +		0x1f040, 0x1f04c,
 +		0x1f284, 0x1f28c,
 +		0x1f2c0, 0x1f2c0,
 +		0x1f2e0, 0x1f2e0,
 +		0x1f300, 0x1f384,
 +		0x1f3c0, 0x1f3c8,
 +		0x1f440, 0x1f44c,
 +		0x1f684, 0x1f68c,
 +		0x1f6c0, 0x1f6c0,
 +		0x1f6e0, 0x1f6e0,
 +		0x1f700, 0x1f784,
 +		0x1f7c0, 0x1f7c8,
 +		0x1f840, 0x1f84c,
 +		0x1fa84, 0x1fa8c,
 +		0x1fac0, 0x1fac0,
 +		0x1fae0, 0x1fae0,
 +		0x1fb00, 0x1fb84,
 +		0x1fbc0, 0x1fbc8,
 +		0x1fc40, 0x1fc4c,
 +		0x1fe84, 0x1fe8c,
 +		0x1fec0, 0x1fec0,
 +		0x1fee0, 0x1fee0,
 +		0x1ff00, 0x1ff84,
 +		0x1ffc0, 0x1ffc8,
 +		0x20000, 0x2002c,
 +		0x20100, 0x2013c,
 +		0x20190, 0x201c8,
 +		0x20200, 0x20318,
 +		0x20400, 0x20528,
 +		0x20540, 0x20614,
 +		0x21000, 0x21040,
 +		0x2104c, 0x21060,
 +		0x210c0, 0x210ec,
 +		0x21200, 0x21268,
 +		0x21270, 0x21284,
 +		0x212fc, 0x21388,
 +		0x21400, 0x21404,
 +		0x21500, 0x21518,
 +		0x2152c, 0x2153c,
 +		0x21550, 0x21554,
 +		0x21600, 0x21600,
 +		0x21608, 0x21628,
 +		0x21630, 0x2163c,
 +		0x21700, 0x2171c,
 +		0x21780, 0x2178c,
 +		0x21800, 0x21c38,
 +		0x21c80, 0x21d7c,
 +		0x21e00, 0x21e04,
 +		0x22000, 0x2202c,
 +		0x22100, 0x2213c,
 +		0x22190, 0x221c8,
 +		0x22200, 0x22318,
 +		0x22400, 0x22528,
 +		0x22540, 0x22614,
 +		0x23000, 0x23040,
 +		0x2304c, 0x23060,
 +		0x230c0, 0x230ec,
 +		0x23200, 0x23268,
 +		0x23270, 0x23284,
 +		0x232fc, 0x23388,
 +		0x23400, 0x23404,
 +		0x23500, 0x23518,
 +		0x2352c, 0x2353c,
 +		0x23550, 0x23554,
 +		0x23600, 0x23600,
 +		0x23608, 0x23628,
 +		0x23630, 0x2363c,
 +		0x23700, 0x2371c,
 +		0x23780, 0x2378c,
 +		0x23800, 0x23c38,
 +		0x23c80, 0x23d7c,
 +		0x23e00, 0x23e04,
 +		0x24000, 0x2402c,
 +		0x24100, 0x2413c,
 +		0x24190, 0x241c8,
 +		0x24200, 0x24318,
 +		0x24400, 0x24528,
 +		0x24540, 0x24614,
 +		0x25000, 0x25040,
 +		0x2504c, 0x25060,
 +		0x250c0, 0x250ec,
 +		0x25200, 0x25268,
 +		0x25270, 0x25284,
 +		0x252fc, 0x25388,
 +		0x25400, 0x25404,
 +		0x25500, 0x25518,
 +		0x2552c, 0x2553c,
 +		0x25550, 0x25554,
 +		0x25600, 0x25600,
 +		0x25608, 0x25628,
 +		0x25630, 0x2563c,
 +		0x25700, 0x2571c,
 +		0x25780, 0x2578c,
 +		0x25800, 0x25c38,
 +		0x25c80, 0x25d7c,
 +		0x25e00, 0x25e04,
 +		0x26000, 0x2602c,
 +		0x26100, 0x2613c,
 +		0x26190, 0x261c8,
 +		0x26200, 0x26318,
 +		0x26400, 0x26528,
 +		0x26540, 0x26614,
 +		0x27000, 0x27040,
 +		0x2704c, 0x27060,
 +		0x270c0, 0x270ec,
 +		0x27200, 0x27268,
 +		0x27270, 0x27284,
 +		0x272fc, 0x27388,
 +		0x27400, 0x27404,
 +		0x27500, 0x27518,
 +		0x2752c, 0x2753c,
 +		0x27550, 0x27554,
 +		0x27600, 0x27600,
 +		0x27608, 0x27628,
 +		0x27630, 0x2763c,
 +		0x27700, 0x2771c,
 +		0x27780, 0x2778c,
 +		0x27800, 0x27c38,
 +		0x27c80, 0x27d7c,
 +		0x27e00, 0x27e04
 +	};
 +
 +	int i;
 +	struct adapter *ap = netdev2adap(dev);
 +
 +	regs->version = mk_adap_vers(ap);
 +
 +	memset(buf, 0, T4_REGMAP_SIZE);
 +	for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
 +		reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
 +}
 +
 +static int restart_autoneg(struct net_device *dev)
 +{
 +	struct port_info *p = netdev_priv(dev);
 +
 +	if (!netif_running(dev))
 +		return -EAGAIN;
 +	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
 +		return -EINVAL;
 +	t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
 +	return 0;
 +}
 +
 +static int identify_port(struct net_device *dev,
 +			 enum ethtool_phys_id_state state)
 +{
 +	unsigned int val;
 +	struct adapter *adap = netdev2adap(dev);
 +
 +	if (state == ETHTOOL_ID_ACTIVE)
 +		val = 0xffff;
 +	else if (state == ETHTOOL_ID_INACTIVE)
 +		val = 0;
 +	else
 +		return -EINVAL;
 +
 +	return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
 +}
 +
 +static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
 +{
 +	unsigned int v = 0;
 +
 +	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
 +	    type == FW_PORT_TYPE_BT_XAUI) {
 +		v |= SUPPORTED_TP;
 +		if (caps & FW_PORT_CAP_SPEED_100M)
 +			v |= SUPPORTED_100baseT_Full;
 +		if (caps & FW_PORT_CAP_SPEED_1G)
 +			v |= SUPPORTED_1000baseT_Full;
 +		if (caps & FW_PORT_CAP_SPEED_10G)
 +			v |= SUPPORTED_10000baseT_Full;
 +	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
 +		v |= SUPPORTED_Backplane;
 +		if (caps & FW_PORT_CAP_SPEED_1G)
 +			v |= SUPPORTED_1000baseKX_Full;
 +		if (caps & FW_PORT_CAP_SPEED_10G)
 +			v |= SUPPORTED_10000baseKX4_Full;
 +	} else if (type == FW_PORT_TYPE_KR)
 +		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
 +	else if (type == FW_PORT_TYPE_BP_AP)
 +		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
 +		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
 +	else if (type == FW_PORT_TYPE_BP4_AP)
 +		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
 +		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
 +		     SUPPORTED_10000baseKX4_Full;
 +	else if (type == FW_PORT_TYPE_FIBER_XFI ||
 +		 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
 +		v |= SUPPORTED_FIBRE;
 +
 +	if (caps & FW_PORT_CAP_ANEG)
 +		v |= SUPPORTED_Autoneg;
 +	return v;
 +}
 +
 +static unsigned int to_fw_linkcaps(unsigned int caps)
 +{
 +	unsigned int v = 0;
 +
 +	if (caps & ADVERTISED_100baseT_Full)
 +		v |= FW_PORT_CAP_SPEED_100M;
 +	if (caps & ADVERTISED_1000baseT_Full)
 +		v |= FW_PORT_CAP_SPEED_1G;
 +	if (caps & ADVERTISED_10000baseT_Full)
 +		v |= FW_PORT_CAP_SPEED_10G;
 +	return v;
 +}
 +
 +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +	const struct port_info *p = netdev_priv(dev);
 +
 +	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
 +	    p->port_type == FW_PORT_TYPE_BT_XFI ||
 +	    p->port_type == FW_PORT_TYPE_BT_XAUI)
 +		cmd->port = PORT_TP;
 +	else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
 +		 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
 +		cmd->port = PORT_FIBRE;
 +	else if (p->port_type == FW_PORT_TYPE_SFP) {
 +		if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
 +		    p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
 +			cmd->port = PORT_DA;
 +		else
 +			cmd->port = PORT_FIBRE;
 +	} else
 +		cmd->port = PORT_OTHER;
 +
 +	if (p->mdio_addr >= 0) {
 +		cmd->phy_address = p->mdio_addr;
 +		cmd->transceiver = XCVR_EXTERNAL;
 +		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
 +			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
 +	} else {
 +		cmd->phy_address = 0;  /* not really, but no better option */
 +		cmd->transceiver = XCVR_INTERNAL;
 +		cmd->mdio_support = 0;
 +	}
 +
 +	cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
 +	cmd->advertising = from_fw_linkcaps(p->port_type,
 +					    p->link_cfg.advertising);
 +	ethtool_cmd_speed_set(cmd,
 +			      netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
 +	cmd->duplex = DUPLEX_FULL;
 +	cmd->autoneg = p->link_cfg.autoneg;
 +	cmd->maxtxpkt = 0;
 +	cmd->maxrxpkt = 0;
 +	return 0;
 +}
 +
 +static unsigned int speed_to_caps(int speed)
 +{
 +	if (speed == SPEED_100)
 +		return FW_PORT_CAP_SPEED_100M;
 +	if (speed == SPEED_1000)
 +		return FW_PORT_CAP_SPEED_1G;
 +	if (speed == SPEED_10000)
 +		return FW_PORT_CAP_SPEED_10G;
 +	return 0;
 +}
 +
 +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +	unsigned int cap;
 +	struct port_info *p = netdev_priv(dev);
 +	struct link_config *lc = &p->link_cfg;
 +	u32 speed = ethtool_cmd_speed(cmd);
 +
 +	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
 +		return -EINVAL;
 +
 +	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
 +		/*
 +		 * PHY offers a single speed.  See if that's what's
 +		 * being requested.
 +		 */
 +		if (cmd->autoneg == AUTONEG_DISABLE &&
 +		    (lc->supported & speed_to_caps(speed)))
 +			return 0;
 +		return -EINVAL;
 +	}
 +
 +	if (cmd->autoneg == AUTONEG_DISABLE) {
 +		cap = speed_to_caps(speed);
 +
 +		if (!(lc->supported & cap) || (speed == SPEED_1000) ||
 +		    (speed == SPEED_10000))
 +			return -EINVAL;
 +		lc->requested_speed = cap;
 +		lc->advertising = 0;
 +	} else {
 +		cap = to_fw_linkcaps(cmd->advertising);
 +		if (!(lc->supported & cap))
 +			return -EINVAL;
 +		lc->requested_speed = 0;
 +		lc->advertising = cap | FW_PORT_CAP_ANEG;
 +	}
 +	lc->autoneg = cmd->autoneg;
 +
 +	if (netif_running(dev))
 +		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
 +				     lc);
 +	return 0;
 +}
 +
 +static void get_pauseparam(struct net_device *dev,
 +			   struct ethtool_pauseparam *epause)
 +{
 +	struct port_info *p = netdev_priv(dev);
 +
 +	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
 +	epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
 +	epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
 +}
 +
 +static int set_pauseparam(struct net_device *dev,
 +			  struct ethtool_pauseparam *epause)
 +{
 +	struct port_info *p = netdev_priv(dev);
 +	struct link_config *lc = &p->link_cfg;
 +
 +	if (epause->autoneg == AUTONEG_DISABLE)
 +		lc->requested_fc = 0;
 +	else if (lc->supported & FW_PORT_CAP_ANEG)
 +		lc->requested_fc = PAUSE_AUTONEG;
 +	else
 +		return -EINVAL;
 +
 +	if (epause->rx_pause)
 +		lc->requested_fc |= PAUSE_RX;
 +	if (epause->tx_pause)
 +		lc->requested_fc |= PAUSE_TX;
 +	if (netif_running(dev))
 +		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
 +				     lc);
 +	return 0;
 +}
 +
 +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 +{
 +	const struct port_info *pi = netdev_priv(dev);
 +	const struct sge *s = &pi->adapter->sge;
 +
 +	e->rx_max_pending = MAX_RX_BUFFERS;
 +	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
 +	e->rx_jumbo_max_pending = 0;
 +	e->tx_max_pending = MAX_TXQ_ENTRIES;
 +
 +	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
 +	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
 +	e->rx_jumbo_pending = 0;
 +	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
 +}
 +
 +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 +{
 +	int i;
 +	const struct port_info *pi = netdev_priv(dev);
 +	struct adapter *adapter = pi->adapter;
 +	struct sge *s = &adapter->sge;
 +
 +	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
 +	    e->tx_pending > MAX_TXQ_ENTRIES ||
 +	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
 +	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
 +	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
 +		return -EINVAL;
 +
 +	if (adapter->flags & FULL_INIT_DONE)
 +		return -EBUSY;
 +
 +	for (i = 0; i < pi->nqsets; ++i) {
 +		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
 +		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
 +		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
 +	}
 +	return 0;
 +}
 +
 +static int closest_timer(const struct sge *s, int time)
 +{
 +	int i, delta, match = 0, min_delta = INT_MAX;
 +
 +	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
 +		delta = time - s->timer_val[i];
 +		if (delta < 0)
 +			delta = -delta;
 +		if (delta < min_delta) {
 +			min_delta = delta;
 +			match = i;
 +		}
 +	}
 +	return match;
 +}
 +
 +static int closest_thres(const struct sge *s, int thres)
 +{
 +	int i, delta, match = 0, min_delta = INT_MAX;
 +
 +	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
 +		delta = thres - s->counter_val[i];
 +		if (delta < 0)
 +			delta = -delta;
 +		if (delta < min_delta) {
 +			min_delta = delta;
 +			match = i;
 +		}
 +	}
 +	return match;
 +}
 +
 +/*
 + * Return a queue's interrupt hold-off time in us.  0 means no timer.
 + */
 +static unsigned int qtimer_val(const struct adapter *adap,
 +			       const struct sge_rspq *q)
 +{
 +	unsigned int idx = q->intr_params >> 1;
 +
 +	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
 +}
 +
 +/**
 + *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
 + *	@adap: the adapter
 + *	@q: the Rx queue
 + *	@us: the hold-off time in us, or 0 to disable timer
 + *	@cnt: the hold-off packet count, or 0 to disable counter
 + *
 + *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
 + *	one of the two needs to be enabled for the queue to generate interrupts.
 + */
 +static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
 +			       unsigned int us, unsigned int cnt)
 +{
 +	if ((us | cnt) == 0)
 +		cnt = 1;
 +
 +	if (cnt) {
 +		int err;
 +		u32 v, new_idx;
 +
 +		new_idx = closest_thres(&adap->sge, cnt);
 +		if (q->desc && q->pktcnt_idx != new_idx) {
 +			/* the queue has already been created, update it */
 +			v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
 +			    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
 +			    FW_PARAMS_PARAM_YZ(q->cntxt_id);
 +			err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
 +					    &new_idx);
 +			if (err)
 +				return err;
 +		}
 +		q->pktcnt_idx = new_idx;
 +	}
 +
 +	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
 +	q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
 +	return 0;
 +}
 +
 +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 +{
 +	const struct port_info *pi = netdev_priv(dev);
 +	struct adapter *adap = pi->adapter;
 +
 +	return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
 +			c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
 +}
 +
 +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 +{
 +	const struct port_info *pi = netdev_priv(dev);
 +	const struct adapter *adap = pi->adapter;
 +	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
 +
 +	c->rx_coalesce_usecs = qtimer_val(adap, rq);
 +	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
 +		adap->sge.counter_val[rq->pktcnt_idx] : 0;
 +	return 0;
 +}
 +
 +/**
 + *	eeprom_ptov - translate a physical EEPROM address to virtual
 + *	@phys_addr: the physical EEPROM address
 + *	@fn: the PCI function number
 + *	@sz: size of function-specific area
 + *
 + *	Translate a physical EEPROM address to virtual.  The first 1K is
 + *	accessed through virtual addresses starting at 31K, the rest is
 + *	accessed through virtual addresses starting at 0.
 + *
 + *	The mapping is as follows:
 + *	[0..1K) -> [31K..32K)
 + *	[1K..1K+A) -> [31K-A..31K)
 + *	[1K+A..ES) -> [0..ES-A-1K)
 + *
 + *	where A = @fn * @sz, and ES = EEPROM size.
 + */
 +static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
 +{
 +	fn *= sz;
 +	if (phys_addr < 1024)
 +		return phys_addr + (31 << 10);
 +	if (phys_addr < 1024 + fn)
 +		return 31744 - fn + phys_addr - 1024;
 +	if (phys_addr < EEPROMSIZE)
 +		return phys_addr - 1024 - fn;
 +	return -EINVAL;
 +}
 +
 +/*
 + * The next two routines implement eeprom read/write from physical addresses.
 + */
 +static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
 +{
 +	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
 +
 +	if (vaddr >= 0)
 +		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
 +	return vaddr < 0 ? vaddr : 0;
 +}
 +
 +static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
 +{
 +	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
 +
 +	if (vaddr >= 0)
 +		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
 +	return vaddr < 0 ? vaddr : 0;
 +}
 +
 +#define EEPROM_MAGIC 0x38E2F10C
 +
 +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 +		      u8 *data)
 +{
 +	int i, err = 0;
 +	struct adapter *adapter = netdev2adap(dev);
 +
 +	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
 +	if (!buf)
 +		return -ENOMEM;
 +
 +	e->magic = EEPROM_MAGIC;
 +	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
 +		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
 +
 +	if (!err)
 +		memcpy(data, buf + e->offset, e->len);
 +	kfree(buf);
 +	return err;
 +}
 +
 +static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
 +		      u8 *data)
 +{
 +	u8 *buf;
 +	int err = 0;
 +	u32 aligned_offset, aligned_len, *p;
 +	struct adapter *adapter = netdev2adap(dev);
 +
 +	if (eeprom->magic != EEPROM_MAGIC)
 +		return -EINVAL;
 +
 +	aligned_offset = eeprom->offset & ~3;
 +	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
 +
 +	if (adapter->fn > 0) {
 +		u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
 +
 +		if (aligned_offset < start ||
 +		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
 +			return -EPERM;
 +	}
 +
 +	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
 +		/*
 +		 * RMW possibly needed for first or last words.
 +		 */
 +		buf = kmalloc(aligned_len, GFP_KERNEL);
 +		if (!buf)
 +			return -ENOMEM;
 +		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
 +		if (!err && aligned_len > 4)
 +			err = eeprom_rd_phys(adapter,
 +					     aligned_offset + aligned_len - 4,
 +					     (u32 *)&buf[aligned_len - 4]);
 +		if (err)
 +			goto out;
 +		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
 +	} else
 +		buf = data;
 +
 +	err = t4_seeprom_wp(adapter, false);
 +	if (err)
 +		goto out;
 +
 +	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
 +		err = eeprom_wr_phys(adapter, aligned_offset, *p);
 +		aligned_offset += 4;
 +	}
 +
 +	if (!err)
 +		err = t4_seeprom_wp(adapter, true);
 +out:
 +	if (buf != data)
 +		kfree(buf);
 +	return err;
 +}
 +
 +static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
 +{
 +	int ret;
 +	const struct firmware *fw;
 +	struct adapter *adap = netdev2adap(netdev);
 +
 +	ef->data[sizeof(ef->data) - 1] = '\0';
 +	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
 +	if (ret < 0)
 +		return ret;
 +
 +	ret = t4_load_fw(adap, fw->data, fw->size);
 +	release_firmware(fw);
 +	if (!ret)
 +		dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
 +	return ret;
 +}
 +
 +#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
 +#define BCAST_CRC 0xa0ccc1a6
 +
 +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +	wol->supported = WAKE_BCAST | WAKE_MAGIC;
 +	wol->wolopts = netdev2adap(dev)->wol;
 +	memset(&wol->sopass, 0, sizeof(wol->sopass));
 +}
 +
 +static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +	int err = 0;
 +	struct port_info *pi = netdev_priv(dev);
 +
 +	if (wol->wolopts & ~WOL_SUPPORTED)
 +		return -EINVAL;
 +	t4_wol_magic_enable(pi->adapter, pi->tx_chan,
 +			    (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
 +	if (wol->wolopts & WAKE_BCAST) {
 +		err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
 +					~0ULL, 0, false);
 +		if (!err)
 +			err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
 +						~6ULL, ~0ULL, BCAST_CRC, true);
 +	} else
 +		t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
 +	return err;
 +}
 +
 +static int cxgb_set_features(struct net_device *dev, u32 features)
 +{
 +	const struct port_info *pi = netdev_priv(dev);
 +	u32 changed = dev->features ^ features;
 +	int err;
 +
 +	if (!(changed & NETIF_F_HW_VLAN_RX))
 +		return 0;
 +
 +	err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
 +			    -1, -1, -1,
 +			    !!(features & NETIF_F_HW_VLAN_RX), true);
 +	if (unlikely(err))
 +		dev->features = features ^ NETIF_F_HW_VLAN_RX;
 +	return err;
 +}
 +
 +static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
 +{
 +	const struct port_info *pi = netdev_priv(dev);
 +	unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
 +
 +	p->size = pi->rss_size;
 +	while (n--)
 +		p->ring_index[n] = pi->rss[n];
 +	return 0;
 +}
 +
 +static int set_rss_table(struct net_device *dev,
 +			 const struct ethtool_rxfh_indir *p)
 +{
 +	unsigned int i;
 +	struct port_info *pi = netdev_priv(dev);
 +
 +	if (p->size != pi->rss_size)
 +		return -EINVAL;
 +	for (i = 0; i < p->size; i++)
 +		if (p->ring_index[i] >= pi->nqsets)
 +			return -EINVAL;
 +	for (i = 0; i < p->size; i++)
 +		pi->rss[i] = p->ring_index[i];
 +	if (pi->adapter->flags & FULL_INIT_DONE)
 +		return write_rss(pi, pi->rss);
 +	return 0;
 +}
 +
 +static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
 +		     u32 *rules)
 +{
 +	const struct port_info *pi = netdev_priv(dev);
 +
 +	switch (info->cmd) {
 +	case ETHTOOL_GRXFH: {
 +		unsigned int v = pi->rss_mode;
 +
 +		info->data = 0;
 +		switch (info->flow_type) {
 +		case TCP_V4_FLOW:
 +			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
 +				info->data = RXH_IP_SRC | RXH_IP_DST |
 +					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
 +			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
 +				info->data = RXH_IP_SRC | RXH_IP_DST;
 +			break;
 +		case UDP_V4_FLOW:
 +			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
 +			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
 +				info->data = RXH_IP_SRC | RXH_IP_DST |
 +					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
 +			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
 +				info->data = RXH_IP_SRC | RXH_IP_DST;
 +			break;
 +		case SCTP_V4_FLOW:
 +		case AH_ESP_V4_FLOW:
 +		case IPV4_FLOW:
 +			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
 +				info->data = RXH_IP_SRC | RXH_IP_DST;
 +			break;
 +		case TCP_V6_FLOW:
 +			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
 +				info->data = RXH_IP_SRC | RXH_IP_DST |
 +					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
 +			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
 +				info->data = RXH_IP_SRC | RXH_IP_DST;
 +			break;
 +		case UDP_V6_FLOW:
 +			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
 +			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
 +				info->data = RXH_IP_SRC | RXH_IP_DST |
 +					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
 +			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
 +				info->data = RXH_IP_SRC | RXH_IP_DST;
 +			break;
 +		case SCTP_V6_FLOW:
 +		case AH_ESP_V6_FLOW:
 +		case IPV6_FLOW:
 +			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
 +				info->data = RXH_IP_SRC | RXH_IP_DST;
 +			break;
 +		}
 +		return 0;
 +	}
 +	case ETHTOOL_GRXRINGS:
 +		info->data = pi->nqsets;
 +		return 0;
 +	}
 +	return -EOPNOTSUPP;
 +}
 +
 +static struct ethtool_ops cxgb_ethtool_ops = {
 +	.get_settings      = get_settings,
 +	.set_settings      = set_settings,
 +	.get_drvinfo       = get_drvinfo,
 +	.get_msglevel      = get_msglevel,
 +	.set_msglevel      = set_msglevel,
 +	.get_ringparam     = get_sge_param,
 +	.set_ringparam     = set_sge_param,
 +	.get_coalesce      = get_coalesce,
 +	.set_coalesce      = set_coalesce,
 +	.get_eeprom_len    = get_eeprom_len,
 +	.get_eeprom        = get_eeprom,
 +	.set_eeprom        = set_eeprom,
 +	.get_pauseparam    = get_pauseparam,
 +	.set_pauseparam    = set_pauseparam,
 +	.get_link          = ethtool_op_get_link,
 +	.get_strings       = get_strings,
 +	.set_phys_id       = identify_port,
 +	.nway_reset        = restart_autoneg,
 +	.get_sset_count    = get_sset_count,
 +	.get_ethtool_stats = get_stats,
 +	.get_regs_len      = get_regs_len,
 +	.get_regs          = get_regs,
 +	.get_wol           = get_wol,
 +	.set_wol           = set_wol,
 +	.get_rxnfc         = get_rxnfc,
 +	.get_rxfh_indir    = get_rss_table,
 +	.set_rxfh_indir    = set_rss_table,
 +	.flash_device      = set_flash,
 +};
 +
 +/*
 + * debugfs support
 + */
 +
 +static int mem_open(struct inode *inode, struct file *file)
 +{
 +	file->private_data = inode->i_private;
 +	return 0;
 +}
 +
 +static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
 +			loff_t *ppos)
 +{
 +	loff_t pos = *ppos;
 +	loff_t avail = file->f_path.dentry->d_inode->i_size;
 +	unsigned int mem = (uintptr_t)file->private_data & 3;
 +	struct adapter *adap = file->private_data - mem;
 +
 +	if (pos < 0)
 +		return -EINVAL;
 +	if (pos >= avail)
 +		return 0;
 +	if (count > avail - pos)
 +		count = avail - pos;
 +
 +	while (count) {
 +		size_t len;
 +		int ret, ofst;
 +		__be32 data[16];
 +
 +		if (mem == MEM_MC)
 +			ret = t4_mc_read(adap, pos, data, NULL);
 +		else
 +			ret = t4_edc_read(adap, mem, pos, data, NULL);
 +		if (ret)
 +			return ret;
 +
 +		ofst = pos % sizeof(data);
 +		len = min(count, sizeof(data) - ofst);
 +		if (copy_to_user(buf, (u8 *)data + ofst, len))
 +			return -EFAULT;
 +
 +		buf += len;
 +		pos += len;
 +		count -= len;
 +	}
 +	count = pos - *ppos;
 +	*ppos = pos;
 +	return count;
 +}
 +
 +static const struct file_operations mem_debugfs_fops = {
 +	.owner   = THIS_MODULE,
 +	.open    = mem_open,
 +	.read    = mem_read,
 +	.llseek  = default_llseek,
 +};
 +
 +static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
 +				      unsigned int idx, unsigned int size_mb)
 +{
 +	struct dentry *de;
 +
 +	de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
 +				 (void *)adap + idx, &mem_debugfs_fops);
 +	if (de && de->d_inode)
 +		de->d_inode->i_size = size_mb << 20;
 +}
 +
 +static int __devinit setup_debugfs(struct adapter *adap)
 +{
 +	int i;
 +
 +	if (IS_ERR_OR_NULL(adap->debugfs_root))
 +		return -1;
 +
 +	i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
 +	if (i & EDRAM0_ENABLE)
 +		add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
 +	if (i & EDRAM1_ENABLE)
 +		add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
 +	if (i & EXT_MEM_ENABLE)
 +		add_debugfs_mem(adap, "mc", MEM_MC,
 +			EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
 +	if (adap->l2t)
 +		debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
 +				    &t4_l2t_fops);
 +	return 0;
 +}
 +
 +/*
 + * upper-layer driver support
 + */
 +
 +/*
 + * Allocate an active-open TID and set it to the supplied value.
 + */
 +int cxgb4_alloc_atid(struct tid_info *t, void *data)
 +{
 +	int atid = -1;
 +
 +	spin_lock_bh(&t->atid_lock);
 +	if (t->afree) {
 +		union aopen_entry *p = t->afree;
 +
 +		atid = p - t->atid_tab;
 +		t->afree = p->next;
 +		p->data = data;
 +		t->atids_in_use++;
 +	}
 +	spin_unlock_bh(&t->atid_lock);
 +	return atid;
 +}
 +EXPORT_SYMBOL(cxgb4_alloc_atid);
 +
 +/*
 + * Release an active-open TID.
 + */
 +void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
 +{
 +	union aopen_entry *p = &t->atid_tab[atid];
 +
 +	spin_lock_bh(&t->atid_lock);
 +	p->next = t->afree;
 +	t->afree = p;
 +	t->atids_in_use--;
 +	spin_unlock_bh(&t->atid_lock);
 +}
 +EXPORT_SYMBOL(cxgb4_free_atid);
 +
 +/*
 + * Allocate a server TID and set it to the supplied value.
 + */
 +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
 +{
 +	int stid;
 +
 +	spin_lock_bh(&t->stid_lock);
 +	if (family == PF_INET) {
 +		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
 +		if (stid < t->nstids)
 +			__set_bit(stid, t->stid_bmap);
 +		else
 +			stid = -1;
 +	} else {
 +		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
 +		if (stid < 0)
 +			stid = -1;
 +	}
 +	if (stid >= 0) {
 +		t->stid_tab[stid].data = data;
 +		stid += t->stid_base;
 +		t->stids_in_use++;
 +	}
 +	spin_unlock_bh(&t->stid_lock);
 +	return stid;
 +}
 +EXPORT_SYMBOL(cxgb4_alloc_stid);
 +
 +/*
 + * Release a server TID.
 + */
 +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
 +{
 +	stid -= t->stid_base;
 +	spin_lock_bh(&t->stid_lock);
 +	if (family == PF_INET)
 +		__clear_bit(stid, t->stid_bmap);
 +	else
 +		bitmap_release_region(t->stid_bmap, stid, 2);
 +	t->stid_tab[stid].data = NULL;
 +	t->stids_in_use--;
 +	spin_unlock_bh(&t->stid_lock);
 +}
 +EXPORT_SYMBOL(cxgb4_free_stid);
 +
 +/*
 + * Populate a TID_RELEASE WR.  Caller must properly size the skb.
 + */
 +static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
 +			   unsigned int tid)
 +{
 +	struct cpl_tid_release *req;
 +
 +	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
 +	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
 +	INIT_TP_WR(req, tid);
 +	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
 +}
 +
 +/*
 + * Queue a TID release request and if necessary schedule a work queue to
 + * process it.
 + */
 +static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
 +				    unsigned int tid)
 +{
 +	void **p = &t->tid_tab[tid];
 +	struct adapter *adap = container_of(t, struct adapter, tids);
 +
 +	spin_lock_bh(&adap->tid_release_lock);
 +	*p = adap->tid_release_head;
 +	/* Low 2 bits encode the Tx channel number */
 +	adap->tid_release_head = (void **)((uintptr_t)p | chan);
 +	if (!adap->tid_release_task_busy) {
 +		adap->tid_release_task_busy = true;
 +		schedule_work(&adap->tid_release_task);
 +	}
 +	spin_unlock_bh(&adap->tid_release_lock);
 +}
 +
 +/*
 + * Process the list of pending TID release requests.
 + */
 +static void process_tid_release_list(struct work_struct *work)
 +{
 +	struct sk_buff *skb;
 +	struct adapter *adap;
 +
 +	adap = container_of(work, struct adapter, tid_release_task);
 +
 +	spin_lock_bh(&adap->tid_release_lock);
 +	while (adap->tid_release_head) {
 +		void **p = adap->tid_release_head;
 +		unsigned int chan = (uintptr_t)p & 3;
 +		p = (void *)p - chan;
 +
 +		adap->tid_release_head = *p;
 +		*p = NULL;
 +		spin_unlock_bh(&adap->tid_release_lock);
 +
 +		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
 +					 GFP_KERNEL)))
 +			schedule_timeout_uninterruptible(1);
 +
 +		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
 +		t4_ofld_send(adap, skb);
 +		spin_lock_bh(&adap->tid_release_lock);
 +	}
 +	adap->tid_release_task_busy = false;
 +	spin_unlock_bh(&adap->tid_release_lock);
 +}
 +
 +/*
 + * Release a TID and inform HW.  If we are unable to allocate the release
 + * message we defer to a work queue.
 + */
 +void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
 +{
 +	void *old;
 +	struct sk_buff *skb;
 +	struct adapter *adap = container_of(t, struct adapter, tids);
 +
 +	old = t->tid_tab[tid];
 +	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
 +	if (likely(skb)) {
 +		t->tid_tab[tid] = NULL;
 +		mk_tid_release(skb, chan, tid);
 +		t4_ofld_send(adap, skb);
 +	} else
 +		cxgb4_queue_tid_release(t, chan, tid);
 +	if (old)
 +		atomic_dec(&t->tids_in_use);
 +}
 +EXPORT_SYMBOL(cxgb4_remove_tid);
 +
 +/*
 + * Allocate and initialize the TID tables.  Returns 0 on success.
 + */
 +static int tid_init(struct tid_info *t)
 +{
 +	size_t size;
 +	unsigned int natids = t->natids;
 +
 +	size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
 +	       t->nstids * sizeof(*t->stid_tab) +
 +	       BITS_TO_LONGS(t->nstids) * sizeof(long);
 +	t->tid_tab = t4_alloc_mem(size);
 +	if (!t->tid_tab)
 +		return -ENOMEM;
 +
 +	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
 +	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
 +	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
 +	spin_lock_init(&t->stid_lock);
 +	spin_lock_init(&t->atid_lock);
 +
 +	t->stids_in_use = 0;
 +	t->afree = NULL;
 +	t->atids_in_use = 0;
 +	atomic_set(&t->tids_in_use, 0);
 +
 +	/* Setup the free list for atid_tab and clear the stid bitmap. */
 +	if (natids) {
 +		while (--natids)
 +			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
 +		t->afree = t->atid_tab;
 +	}
 +	bitmap_zero(t->stid_bmap, t->nstids);
 +	return 0;
 +}
 +
 +/**
 + *	cxgb4_create_server - create an IP server
 + *	@dev: the device
 + *	@stid: the server TID
 + *	@sip: local IP address to bind server to
 + *	@sport: the server's TCP port
 + *	@queue: queue to direct messages from this server to
 + *
 + *	Create an IP server for the given port and address.
 + *	Returns <0 on error and one of the %NET_XMIT_* values on success.
 + */
 +int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
 +			__be32 sip, __be16 sport, unsigned int queue)
 +{
 +	unsigned int chan;
 +	struct sk_buff *skb;
 +	struct adapter *adap;
 +	struct cpl_pass_open_req *req;
 +
 +	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 +	if (!skb)
 +		return -ENOMEM;
 +
 +	adap = netdev2adap(dev);
 +	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
 +	INIT_TP_WR(req, 0);
 +	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
 +	req->local_port = sport;
 +	req->peer_port = htons(0);
 +	req->local_ip = sip;
 +	req->peer_ip = htonl(0);
 +	chan = rxq_to_chan(&adap->sge, queue);
 +	req->opt0 = cpu_to_be64(TX_CHAN(chan));
 +	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
 +				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
 +	return t4_mgmt_tx(adap, skb);
 +}
 +EXPORT_SYMBOL(cxgb4_create_server);
 +
 +/**
 + *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
 + *	@mtus: the HW MTU table
 + *	@mtu: the target MTU
 + *	@idx: index of selected entry in the MTU table
 + *
 + *	Returns the index and the value in the HW MTU table that is closest to
 + *	but does not exceed @mtu, unless @mtu is smaller than any value in the
 + *	table, in which case that smallest available value is selected.
 + */
 +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
 +			    unsigned int *idx)
 +{
 +	unsigned int i = 0;
 +
 +	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
 +		++i;
 +	if (idx)
 +		*idx = i;
 +	return mtus[i];
 +}
 +EXPORT_SYMBOL(cxgb4_best_mtu);
 +
 +/**
 + *	cxgb4_port_chan - get the HW channel of a port
 + *	@dev: the net device for the port
 + *
 + *	Return the HW Tx channel of the given port.
 + */
 +unsigned int cxgb4_port_chan(const struct net_device *dev)
 +{
 +	return netdev2pinfo(dev)->tx_chan;
 +}
 +EXPORT_SYMBOL(cxgb4_port_chan);
 +
 +/**
 + *	cxgb4_port_viid - get the VI id of a port
 + *	@dev: the net device for the port
 + *
 + *	Return the VI id of the given port.
 + */
 +unsigned int cxgb4_port_viid(const struct net_device *dev)
 +{
 +	return netdev2pinfo(dev)->viid;
 +}
 +EXPORT_SYMBOL(cxgb4_port_viid);
 +
 +/**
 + *	cxgb4_port_idx - get the index of a port
 + *	@dev: the net device for the port
 + *
 + *	Return the index of the given port.
 + */
 +unsigned int cxgb4_port_idx(const struct net_device *dev)
 +{
 +	return netdev2pinfo(dev)->port_id;
 +}
 +EXPORT_SYMBOL(cxgb4_port_idx);
 +
 +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
 +			 struct tp_tcp_stats *v6)
 +{
 +	struct adapter *adap = pci_get_drvdata(pdev);
 +
 +	spin_lock(&adap->stats_lock);
 +	t4_tp_get_tcp_stats(adap, v4, v6);
 +	spin_unlock(&adap->stats_lock);
 +}
 +EXPORT_SYMBOL(cxgb4_get_tcp_stats);
 +
 +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
 +		      const unsigned int *pgsz_order)
 +{
 +	struct adapter *adap = netdev2adap(dev);
 +
 +	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
 +	t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
 +		     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
 +		     HPZ3(pgsz_order[3]));
 +}
 +EXPORT_SYMBOL(cxgb4_iscsi_init);
 +
 +static struct pci_driver cxgb4_driver;
 +
 +static void check_neigh_update(struct neighbour *neigh)
 +{
 +	const struct device *parent;
 +	const struct net_device *netdev = neigh->dev;
 +
 +	if (netdev->priv_flags & IFF_802_1Q_VLAN)
 +		netdev = vlan_dev_real_dev(netdev);
 +	parent = netdev->dev.parent;
 +	if (parent && parent->driver == &cxgb4_driver.driver)
 +		t4_l2t_update(dev_get_drvdata(parent), neigh);
 +}
 +
 +static int netevent_cb(struct notifier_block *nb, unsigned long event,
 +		       void *data)
 +{
 +	switch (event) {
 +	case NETEVENT_NEIGH_UPDATE:
 +		check_neigh_update(data);
 +		break;
 +	case NETEVENT_REDIRECT:
 +	default:
 +		break;
 +	}
 +	return 0;
 +}
 +
 +static bool netevent_registered;
 +static struct notifier_block cxgb4_netevent_nb = {
 +	.notifier_call = netevent_cb
 +};
 +
 +static void uld_attach(struct adapter *adap, unsigned int uld)
 +{
 +	void *handle;
 +	struct cxgb4_lld_info lli;
 +
 +	lli.pdev = adap->pdev;
 +	lli.l2t = adap->l2t;
 +	lli.tids = &adap->tids;
 +	lli.ports = adap->port;
 +	lli.vr = &adap->vres;
 +	lli.mtus = adap->params.mtus;
 +	if (uld == CXGB4_ULD_RDMA) {
 +		lli.rxq_ids = adap->sge.rdma_rxq;
 +		lli.nrxq = adap->sge.rdmaqs;
 +	} else if (uld == CXGB4_ULD_ISCSI) {
 +		lli.rxq_ids = adap->sge.ofld_rxq;
 +		lli.nrxq = adap->sge.ofldqsets;
 +	}
 +	lli.ntxq = adap->sge.ofldqsets;
 +	lli.nchan = adap->params.nports;
 +	lli.nports = adap->params.nports;
 +	lli.wr_cred = adap->params.ofldq_wr_cred;
 +	lli.adapter_type = adap->params.rev;
 +	lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
 +	lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
 +			t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
 +			(adap->fn * 4));
 +	lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
 +			t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
 +			(adap->fn * 4));
 +	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
 +	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
 +	lli.fw_vers = adap->params.fw_vers;
 +
 +	handle = ulds[uld].add(&lli);
 +	if (IS_ERR(handle)) {
 +		dev_warn(adap->pdev_dev,
 +			 "could not attach to the %s driver, error %ld\n",
 +			 uld_str[uld], PTR_ERR(handle));
 +		return;
 +	}
 +
 +	adap->uld_handle[uld] = handle;
 +
 +	if (!netevent_registered) {
 +		register_netevent_notifier(&cxgb4_netevent_nb);
 +		netevent_registered = true;
 +	}
 +
 +	if (adap->flags & FULL_INIT_DONE)
 +		ulds[uld].state_change(handle, CXGB4_STATE_UP);
 +}
 +
 +static void attach_ulds(struct adapter *adap)
 +{
 +	unsigned int i;
 +
 +	mutex_lock(&uld_mutex);
 +	list_add_tail(&adap->list_node, &adapter_list);
 +	for (i = 0; i < CXGB4_ULD_MAX; i++)
 +		if (ulds[i].add)
 +			uld_attach(adap, i);
 +	mutex_unlock(&uld_mutex);
 +}
 +
 +static void detach_ulds(struct adapter *adap)
 +{
 +	unsigned int i;
 +
 +	mutex_lock(&uld_mutex);
 +	list_del(&adap->list_node);
 +	for (i = 0; i < CXGB4_ULD_MAX; i++)
 +		if (adap->uld_handle[i]) {
 +			ulds[i].state_change(adap->uld_handle[i],
 +					     CXGB4_STATE_DETACH);
 +			adap->uld_handle[i] = NULL;
 +		}
 +	if (netevent_registered && list_empty(&adapter_list)) {
 +		unregister_netevent_notifier(&cxgb4_netevent_nb);
 +		netevent_registered = false;
 +	}
 +	mutex_unlock(&uld_mutex);
 +}
 +
 +static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
 +{
 +	unsigned int i;
 +
 +	mutex_lock(&uld_mutex);
 +	for (i = 0; i < CXGB4_ULD_MAX; i++)
 +		if (adap->uld_handle[i])
 +			ulds[i].state_change(adap->uld_handle[i], new_state);
 +	mutex_unlock(&uld_mutex);
 +}
 +
 +/**
 + *	cxgb4_register_uld - register an upper-layer driver
 + *	@type: the ULD type
 + *	@p: the ULD methods
 + *
 + *	Registers an upper-layer driver with this driver and notifies the ULD
 + *	about any presently available devices that support its type.  Returns
 + *	%-EBUSY if a ULD of the same type is already registered.
 + */
 +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
 +{
 +	int ret = 0;
 +	struct adapter *adap;
 +
 +	if (type >= CXGB4_ULD_MAX)
 +		return -EINVAL;
 +	mutex_lock(&uld_mutex);
 +	if (ulds[type].add) {
 +		ret = -EBUSY;
 +		goto out;
 +	}
 +	ulds[type] = *p;
 +	list_for_each_entry(adap, &adapter_list, list_node)
 +		uld_attach(adap, type);
 +out:	mutex_unlock(&uld_mutex);
 +	return ret;
 +}
 +EXPORT_SYMBOL(cxgb4_register_uld);
 +
 +/**
 + *	cxgb4_unregister_uld - unregister an upper-layer driver
 + *	@type: the ULD type
 + *
 + *	Unregisters an existing upper-layer driver.
 + */
 +int cxgb4_unregister_uld(enum cxgb4_uld type)
 +{
 +	struct adapter *adap;
 +
 +	if (type >= CXGB4_ULD_MAX)
 +		return -EINVAL;
 +	mutex_lock(&uld_mutex);
 +	list_for_each_entry(adap, &adapter_list, list_node)
 +		adap->uld_handle[type] = NULL;
 +	ulds[type].add = NULL;
 +	mutex_unlock(&uld_mutex);
 +	return 0;
 +}
 +EXPORT_SYMBOL(cxgb4_unregister_uld);
 +
 +/**
 + *	cxgb_up - enable the adapter
 + *	@adap: adapter being enabled
 + *
 + *	Called when the first port is enabled, this function performs the
 + *	actions necessary to make an adapter operational, such as completing
 + *	the initialization of HW modules, and enabling interrupts.
 + *
 + *	Must be called with the rtnl lock held.
 + */
 +static int cxgb_up(struct adapter *adap)
 +{
 +	int err;
 +
 +	err = setup_sge_queues(adap);
 +	if (err)
 +		goto out;
 +	err = setup_rss(adap);
 +	if (err)
 +		goto freeq;
 +
 +	if (adap->flags & USING_MSIX) {
 +		name_msix_vecs(adap);
 +		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
 +				  adap->msix_info[0].desc, adap);
 +		if (err)
 +			goto irq_err;
 +
 +		err = request_msix_queue_irqs(adap);
 +		if (err) {
 +			free_irq(adap->msix_info[0].vec, adap);
 +			goto irq_err;
 +		}
 +	} else {
 +		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
 +				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
 +				  adap->port[0]->name, adap);
 +		if (err)
 +			goto irq_err;
 +	}
 +	enable_rx(adap);
 +	t4_sge_start(adap);
 +	t4_intr_enable(adap);
 +	adap->flags |= FULL_INIT_DONE;
 +	notify_ulds(adap, CXGB4_STATE_UP);
 + out:
 +	return err;
 + irq_err:
 +	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
 + freeq:
 +	t4_free_sge_resources(adap);
 +	goto out;
 +}
 +
 +static void cxgb_down(struct adapter *adapter)
 +{
 +	t4_intr_disable(adapter);
 +	cancel_work_sync(&adapter->tid_release_task);
 +	adapter->tid_release_task_busy = false;
 +	adapter->tid_release_head = NULL;
 +
 +	if (adapter->flags & USING_MSIX) {
 +		free_msix_queue_irqs(adapter);
 +		free_irq(adapter->msix_info[0].vec, adapter);
 +	} else
 +		free_irq(adapter->pdev->irq, adapter);
 +	quiesce_rx(adapter);
 +	t4_sge_stop(adapter);
 +	t4_free_sge_resources(adapter);
 +	adapter->flags &= ~FULL_INIT_DONE;
 +}
 +
 +/*
 + * net_device operations
 + */
 +static int cxgb_open(struct net_device *dev)
 +{
 +	int err;
 +	struct port_info *pi = netdev_priv(dev);
 +	struct adapter *adapter = pi->adapter;
 +
 +	netif_carrier_off(dev);
 +
 +	if (!(adapter->flags & FULL_INIT_DONE)) {
 +		err = cxgb_up(adapter);
 +		if (err < 0)
 +			return err;
 +	}
 +
 +	err = link_start(dev);
 +	if (!err)
 +		netif_tx_start_all_queues(dev);
 +	return err;
 +}
 +
 +static int cxgb_close(struct net_device *dev)
 +{
 +	struct port_info *pi = netdev_priv(dev);
 +	struct adapter *adapter = pi->adapter;
 +
 +	netif_tx_stop_all_queues(dev);
 +	netif_carrier_off(dev);
 +	return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
 +}
 +
 +static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
 +						struct rtnl_link_stats64 *ns)
 +{
 +	struct port_stats stats;
 +	struct port_info *p = netdev_priv(dev);
 +	struct adapter *adapter = p->adapter;
 +
 +	spin_lock(&adapter->stats_lock);
 +	t4_get_port_stats(adapter, p->tx_chan, &stats);
 +	spin_unlock(&adapter->stats_lock);
 +
 +	ns->tx_bytes   = stats.tx_octets;
 +	ns->tx_packets = stats.tx_frames;
 +	ns->rx_bytes   = stats.rx_octets;
 +	ns->rx_packets = stats.rx_frames;
 +	ns->multicast  = stats.rx_mcast_frames;
 +
 +	/* detailed rx_errors */
 +	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
 +			       stats.rx_runt;
 +	ns->rx_over_errors   = 0;
 +	ns->rx_crc_errors    = stats.rx_fcs_err;
 +	ns->rx_frame_errors  = stats.rx_symbol_err;
 +	ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
 +			       stats.rx_ovflow2 + stats.rx_ovflow3 +
 +			       stats.rx_trunc0 + stats.rx_trunc1 +
 +			       stats.rx_trunc2 + stats.rx_trunc3;
 +	ns->rx_missed_errors = 0;
 +
 +	/* detailed tx_errors */
 +	ns->tx_aborted_errors   = 0;
 +	ns->tx_carrier_errors   = 0;
 +	ns->tx_fifo_errors      = 0;
 +	ns->tx_heartbeat_errors = 0;
 +	ns->tx_window_errors    = 0;
 +
 +	ns->tx_errors = stats.tx_error_frames;
 +	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
 +		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
 +	return ns;
 +}
 +
 +static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 +{
 +	unsigned int mbox;
 +	int ret = 0, prtad, devad;
 +	struct port_info *pi = netdev_priv(dev);
 +	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
 +
 +	switch (cmd) {
 +	case SIOCGMIIPHY:
 +		if (pi->mdio_addr < 0)
 +			return -EOPNOTSUPP;
 +		data->phy_id = pi->mdio_addr;
 +		break;
 +	case SIOCGMIIREG:
 +	case SIOCSMIIREG:
 +		if (mdio_phy_id_is_c45(data->phy_id)) {
 +			prtad = mdio_phy_id_prtad(data->phy_id);
 +			devad = mdio_phy_id_devad(data->phy_id);
 +		} else if (data->phy_id < 32) {
 +			prtad = data->phy_id;
 +			devad = 0;
 +			data->reg_num &= 0x1f;
 +		} else
 +			return -EINVAL;
 +
 +		mbox = pi->adapter->fn;
 +		if (cmd == SIOCGMIIREG)
 +			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
 +					 data->reg_num, &data->val_out);
 +		else
 +			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
 +					 data->reg_num, data->val_in);
 +		break;
 +	default:
 +		return -EOPNOTSUPP;
 +	}
 +	return ret;
 +}
 +
 +static void cxgb_set_rxmode(struct net_device *dev)
 +{
 +	/* unfortunately we can't return errors to the stack */
 +	set_rxmode(dev, -1, false);
 +}
 +
 +static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +	int ret;
 +	struct port_info *pi = netdev_priv(dev);
 +
 +	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
 +		return -EINVAL;
 +	ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
 +			    -1, -1, -1, true);
 +	if (!ret)
 +		dev->mtu = new_mtu;
 +	return ret;
 +}
 +
 +static int cxgb_set_mac_addr(struct net_device *dev, void *p)
 +{
 +	int ret;
 +	struct sockaddr *addr = p;
 +	struct port_info *pi = netdev_priv(dev);
 +
 +	if (!is_valid_ether_addr(addr->sa_data))
 +		return -EINVAL;
 +
 +	ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
 +			    pi->xact_addr_filt, addr->sa_data, true, true);
 +	if (ret < 0)
 +		return ret;
 +
 +	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 +	pi->xact_addr_filt = ret;
 +	return 0;
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void cxgb_netpoll(struct net_device *dev)
 +{
 +	struct port_info *pi = netdev_priv(dev);
 +	struct adapter *adap = pi->adapter;
 +
 +	if (adap->flags & USING_MSIX) {
 +		int i;
 +		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
 +
 +		for (i = pi->nqsets; i; i--, rx++)
 +			t4_sge_intr_msix(0, &rx->rspq);
 +	} else
 +		t4_intr_handler(adap)(0, adap);
 +}
 +#endif
 +
 +static const struct net_device_ops cxgb4_netdev_ops = {
 +	.ndo_open             = cxgb_open,
 +	.ndo_stop             = cxgb_close,
 +	.ndo_start_xmit       = t4_eth_xmit,
 +	.ndo_get_stats64      = cxgb_get_stats,
 +	.ndo_set_rx_mode      = cxgb_set_rxmode,
 +	.ndo_set_mac_address  = cxgb_set_mac_addr,
 +	.ndo_set_features     = cxgb_set_features,
 +	.ndo_validate_addr    = eth_validate_addr,
 +	.ndo_do_ioctl         = cxgb_ioctl,
 +	.ndo_change_mtu       = cxgb_change_mtu,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +	.ndo_poll_controller  = cxgb_netpoll,
 +#endif
 +};
 +
 +void t4_fatal_err(struct adapter *adap)
 +{
 +	t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
 +	t4_intr_disable(adap);
 +	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
 +}
 +
 +static void setup_memwin(struct adapter *adap)
 +{
 +	u32 bar0;
 +
 +	bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
 +	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
 +		     (bar0 + MEMWIN0_BASE) | BIR(0) |
 +		     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
 +	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
 +		     (bar0 + MEMWIN1_BASE) | BIR(0) |
 +		     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
 +	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
 +		     (bar0 + MEMWIN2_BASE) | BIR(0) |
 +		     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
 +	if (adap->vres.ocq.size) {
 +		unsigned int start, sz_kb;
 +
 +		start = pci_resource_start(adap->pdev, 2) +
 +			OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
 +		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
 +		t4_write_reg(adap,
 +			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
 +			     start | BIR(1) | WINDOW(ilog2(sz_kb)));
 +		t4_write_reg(adap,
 +			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
 +			     adap->vres.ocq.start);
 +		t4_read_reg(adap,
 +			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
 +	}
 +}
 +
 +static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
 +{
 +	u32 v;
 +	int ret;
 +
 +	/* get device capabilities */
 +	memset(c, 0, sizeof(*c));
 +	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
 +			       FW_CMD_REQUEST | FW_CMD_READ);
 +	c->retval_len16 = htonl(FW_LEN16(*c));
 +	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
 +	if (ret < 0)
 +		return ret;
 +
 +	/* select capabilities we'll be using */
 +	if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
 +		if (!vf_acls)
 +			c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
 +		else
 +			c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
 +	} else if (vf_acls) {
 +		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
 +		return ret;
 +	}
 +	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
 +			       FW_CMD_REQUEST | FW_CMD_WRITE);
 +	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
 +	if (ret < 0)
 +		return ret;
 +
 +	ret = t4_config_glbl_rss(adap, adap->fn,
 +				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
 +				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
 +				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
 +	if (ret < 0)
 +		return ret;
 +
 +	ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
 +			  0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
 +	if (ret < 0)
 +		return ret;
 +
 +	t4_sge_init(adap);
 +
 +	/* tweak some settings */
 +	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
 +	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
 +	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
 +	v = t4_read_reg(adap, TP_PIO_DATA);
 +	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
 +
 +	/* get basic stuff going */
 +	return t4_early_init(adap, adap->fn);
 +}
 +
 +/*
 + * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
 + */
 +#define MAX_ATIDS 8192U
 +
 +/*
 + * Phase 0 of initialization: contact FW, obtain config, perform basic init.
 + */
 +static int adap_init0(struct adapter *adap)
 +{
 +	int ret;
 +	u32 v, port_vec;
 +	enum dev_state state;
 +	u32 params[7], val[7];
 +	struct fw_caps_config_cmd c;
 +
 +	ret = t4_check_fw_version(adap);
 +	if (ret == -EINVAL || ret > 0) {
 +		if (upgrade_fw(adap) >= 0)             /* recache FW version */
 +			ret = t4_check_fw_version(adap);
 +	}
 +	if (ret < 0)
 +		return ret;
 +
 +	/* contact FW, request master */
 +	ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
 +	if (ret < 0) {
 +		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
 +			ret);
 +		return ret;
 +	}
 +
 +	/* reset device */
 +	ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
 +	if (ret < 0)
 +		goto bye;
 +
 +	for (v = 0; v < SGE_NTIMERS - 1; v++)
 +		adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
 +	adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
 +	adap->sge.counter_val[0] = 1;
 +	for (v = 1; v < SGE_NCOUNTERS; v++)
 +		adap->sge.counter_val[v] = min(intr_cnt[v - 1],
 +					       THRESHOLD_3_MASK);
 +#define FW_PARAM_DEV(param) \
 +	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
 +	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
 +
 +	params[0] = FW_PARAM_DEV(CCLK);
 +	ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
 +	if (ret < 0)
 +		goto bye;
 +	adap->params.vpd.cclk = val[0];
 +
 +	ret = adap_init1(adap, &c);
 +	if (ret < 0)
 +		goto bye;
 +
 +#define FW_PARAM_PFVF(param) \
 +	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
 +	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
 +	 FW_PARAMS_PARAM_Y(adap->fn))
 +
 +	params[0] = FW_PARAM_DEV(PORTVEC);
 +	params[1] = FW_PARAM_PFVF(L2T_START);
 +	params[2] = FW_PARAM_PFVF(L2T_END);
 +	params[3] = FW_PARAM_PFVF(FILTER_START);
 +	params[4] = FW_PARAM_PFVF(FILTER_END);
 +	params[5] = FW_PARAM_PFVF(IQFLINT_START);
 +	params[6] = FW_PARAM_PFVF(EQ_START);
 +	ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
 +	if (ret < 0)
 +		goto bye;
 +	port_vec = val[0];
 +	adap->tids.ftid_base = val[3];
 +	adap->tids.nftids = val[4] - val[3] + 1;
 +	adap->sge.ingr_start = val[5];
 +	adap->sge.egr_start = val[6];
 +
 +	if (c.ofldcaps) {
 +		/* query offload-related parameters */
 +		params[0] = FW_PARAM_DEV(NTID);
 +		params[1] = FW_PARAM_PFVF(SERVER_START);
 +		params[2] = FW_PARAM_PFVF(SERVER_END);
 +		params[3] = FW_PARAM_PFVF(TDDP_START);
 +		params[4] = FW_PARAM_PFVF(TDDP_END);
 +		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
 +		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
 +				      val);
 +		if (ret < 0)
 +			goto bye;
 +		adap->tids.ntids = val[0];
 +		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
 +		adap->tids.stid_base = val[1];
 +		adap->tids.nstids = val[2] - val[1] + 1;
 +		adap->vres.ddp.start = val[3];
 +		adap->vres.ddp.size = val[4] - val[3] + 1;
 +		adap->params.ofldq_wr_cred = val[5];
 +		adap->params.offload = 1;
 +	}
 +	if (c.rdmacaps) {
 +		params[0] = FW_PARAM_PFVF(STAG_START);
 +		params[1] = FW_PARAM_PFVF(STAG_END);
 +		params[2] = FW_PARAM_PFVF(RQ_START);
 +		params[3] = FW_PARAM_PFVF(RQ_END);
 +		params[4] = FW_PARAM_PFVF(PBL_START);
 +		params[5] = FW_PARAM_PFVF(PBL_END);
 +		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
 +				      val);
 +		if (ret < 0)
 +			goto bye;
 +		adap->vres.stag.start = val[0];
 +		adap->vres.stag.size = val[1] - val[0] + 1;
 +		adap->vres.rq.start = val[2];
 +		adap->vres.rq.size = val[3] - val[2] + 1;
 +		adap->vres.pbl.start = val[4];
 +		adap->vres.pbl.size = val[5] - val[4] + 1;
 +
 +		params[0] = FW_PARAM_PFVF(SQRQ_START);
 +		params[1] = FW_PARAM_PFVF(SQRQ_END);
 +		params[2] = FW_PARAM_PFVF(CQ_START);
 +		params[3] = FW_PARAM_PFVF(CQ_END);
 +		params[4] = FW_PARAM_PFVF(OCQ_START);
 +		params[5] = FW_PARAM_PFVF(OCQ_END);
 +		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
 +				      val);
 +		if (ret < 0)
 +			goto bye;
 +		adap->vres.qp.start = val[0];
 +		adap->vres.qp.size = val[1] - val[0] + 1;
 +		adap->vres.cq.start = val[2];
 +		adap->vres.cq.size = val[3] - val[2] + 1;
 +		adap->vres.ocq.start = val[4];
 +		adap->vres.ocq.size = val[5] - val[4] + 1;
 +	}
 +	if (c.iscsicaps) {
 +		params[0] = FW_PARAM_PFVF(ISCSI_START);
 +		params[1] = FW_PARAM_PFVF(ISCSI_END);
 +		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
 +				      val);
 +		if (ret < 0)
 +			goto bye;
 +		adap->vres.iscsi.start = val[0];
 +		adap->vres.iscsi.size = val[1] - val[0] + 1;
 +	}
 +#undef FW_PARAM_PFVF
 +#undef FW_PARAM_DEV
 +
 +	adap->params.nports = hweight32(port_vec);
 +	adap->params.portvec = port_vec;
 +	adap->flags |= FW_OK;
 +
 +	/* These are finalized by FW initialization, load their values now */
 +	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
 +	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
 +	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
 +	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
 +		     adap->params.b_wnd);
 +
 +#ifdef CONFIG_PCI_IOV
 +	/*
 +	 * Provision resource limits for Virtual Functions.  We currently
 +	 * grant them all the same static resource limits except for the Port
 +	 * Access Rights Mask which we're assigning based on the PF.  All of
 +	 * the static provisioning stuff for both the PF and VF really needs
 +	 * to be managed in a persistent manner for each device which the
 +	 * firmware controls.
 +	 */
 +	{
 +		int pf, vf;
 +
 +		for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
 +			if (num_vf[pf] <= 0)
 +				continue;
 +
 +			/* VF numbering starts at 1! */
 +			for (vf = 1; vf <= num_vf[pf]; vf++) {
 +				ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
 +						  VFRES_NEQ, VFRES_NETHCTRL,
 +						  VFRES_NIQFLINT, VFRES_NIQ,
 +						  VFRES_TC, VFRES_NVI,
 +						  FW_PFVF_CMD_CMASK_MASK,
 +						  pfvfres_pmask(adap, pf, vf),
 +						  VFRES_NEXACTF,
 +						  VFRES_R_CAPS, VFRES_WX_CAPS);
 +				if (ret < 0)
 +					dev_warn(adap->pdev_dev, "failed to "
 +						 "provision pf/vf=%d/%d; "
 +						 "err=%d\n", pf, vf, ret);
 +			}
 +		}
 +	}
 +#endif
 +
 +	setup_memwin(adap);
 +	return 0;
 +
 +	/*
 +	 * If a command timed out or failed with EIO FW does not operate within
 +	 * its spec or something catastrophic happened to HW/FW, stop issuing
 +	 * commands.
 +	 */
 +bye:	if (ret != -ETIMEDOUT && ret != -EIO)
 +		t4_fw_bye(adap, adap->fn);
 +	return ret;
 +}
 +
 +/* EEH callbacks */
 +
 +static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
 +					 pci_channel_state_t state)
 +{
 +	int i;
 +	struct adapter *adap = pci_get_drvdata(pdev);
 +
 +	if (!adap)
 +		goto out;
 +
 +	rtnl_lock();
 +	adap->flags &= ~FW_OK;
 +	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +
 +		netif_device_detach(dev);
 +		netif_carrier_off(dev);
 +	}
 +	if (adap->flags & FULL_INIT_DONE)
 +		cxgb_down(adap);
 +	rtnl_unlock();
 +	pci_disable_device(pdev);
 +out:	return state == pci_channel_io_perm_failure ?
 +		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
 +}
 +
 +static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
 +{
 +	int i, ret;
 +	struct fw_caps_config_cmd c;
 +	struct adapter *adap = pci_get_drvdata(pdev);
 +
 +	if (!adap) {
 +		pci_restore_state(pdev);
 +		pci_save_state(pdev);
 +		return PCI_ERS_RESULT_RECOVERED;
 +	}
 +
 +	if (pci_enable_device(pdev)) {
 +		dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
 +		return PCI_ERS_RESULT_DISCONNECT;
 +	}
 +
 +	pci_set_master(pdev);
 +	pci_restore_state(pdev);
 +	pci_save_state(pdev);
 +	pci_cleanup_aer_uncorrect_error_status(pdev);
 +
 +	if (t4_wait_dev_ready(adap) < 0)
 +		return PCI_ERS_RESULT_DISCONNECT;
 +	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
 +		return PCI_ERS_RESULT_DISCONNECT;
 +	adap->flags |= FW_OK;
 +	if (adap_init1(adap, &c))
 +		return PCI_ERS_RESULT_DISCONNECT;
 +
 +	for_each_port(adap, i) {
 +		struct port_info *p = adap2pinfo(adap, i);
 +
 +		ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
 +				  NULL, NULL);
 +		if (ret < 0)
 +			return PCI_ERS_RESULT_DISCONNECT;
 +		p->viid = ret;
 +		p->xact_addr_filt = -1;
 +	}
 +
 +	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
 +		     adap->params.b_wnd);
 +	setup_memwin(adap);
 +	if (cxgb_up(adap))
 +		return PCI_ERS_RESULT_DISCONNECT;
 +	return PCI_ERS_RESULT_RECOVERED;
 +}
 +
 +static void eeh_resume(struct pci_dev *pdev)
 +{
 +	int i;
 +	struct adapter *adap = pci_get_drvdata(pdev);
 +
 +	if (!adap)
 +		return;
 +
 +	rtnl_lock();
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +
 +		if (netif_running(dev)) {
 +			link_start(dev);
 +			cxgb_set_rxmode(dev);
 +		}
 +		netif_device_attach(dev);
 +	}
 +	rtnl_unlock();
 +}
 +
 +static struct pci_error_handlers cxgb4_eeh = {
 +	.error_detected = eeh_err_detected,
 +	.slot_reset     = eeh_slot_reset,
 +	.resume         = eeh_resume,
 +};
 +
 +static inline bool is_10g_port(const struct link_config *lc)
 +{
 +	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
 +}
 +
 +static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
 +			     unsigned int size, unsigned int iqe_size)
 +{
 +	q->intr_params = QINTR_TIMER_IDX(timer_idx) |
 +			 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
 +	q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
 +	q->iqe_len = iqe_size;
 +	q->size = size;
 +}
 +
 +/*
 + * Perform default configuration of DMA queues depending on the number and type
 + * of ports we found and the number of available CPUs.  Most settings can be
 + * modified by the admin prior to actual use.
 + */
 +static void __devinit cfg_queues(struct adapter *adap)
 +{
 +	struct sge *s = &adap->sge;
 +	int i, q10g = 0, n10g = 0, qidx = 0;
 +
 +	for_each_port(adap, i)
 +		n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
 +
 +	/*
 +	 * We default to 1 queue per non-10G port and up to # of cores queues
 +	 * per 10G port.
 +	 */
 +	if (n10g)
 +		q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
 +	if (q10g > num_online_cpus())
 +		q10g = num_online_cpus();
 +
 +	for_each_port(adap, i) {
 +		struct port_info *pi = adap2pinfo(adap, i);
 +
 +		pi->first_qset = qidx;
 +		pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
 +		qidx += pi->nqsets;
 +	}
 +
 +	s->ethqsets = qidx;
 +	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
 +
 +	if (is_offload(adap)) {
 +		/*
 +		 * For offload we use 1 queue/channel if all ports are up to 1G,
 +		 * otherwise we divide all available queues amongst the channels
 +		 * capped by the number of available cores.
 +		 */
 +		if (n10g) {
 +			i = min_t(int, ARRAY_SIZE(s->ofldrxq),
 +				  num_online_cpus());
 +			s->ofldqsets = roundup(i, adap->params.nports);
 +		} else
 +			s->ofldqsets = adap->params.nports;
 +		/* For RDMA one Rx queue per channel suffices */
 +		s->rdmaqs = adap->params.nports;
 +	}
 +
 +	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
 +		struct sge_eth_rxq *r = &s->ethrxq[i];
 +
 +		init_rspq(&r->rspq, 0, 0, 1024, 64);
 +		r->fl.size = 72;
 +	}
 +
 +	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
 +		s->ethtxq[i].q.size = 1024;
 +
 +	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
 +		s->ctrlq[i].q.size = 512;
 +
 +	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
 +		s->ofldtxq[i].q.size = 1024;
 +
 +	for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
 +		struct sge_ofld_rxq *r = &s->ofldrxq[i];
 +
 +		init_rspq(&r->rspq, 0, 0, 1024, 64);
 +		r->rspq.uld = CXGB4_ULD_ISCSI;
 +		r->fl.size = 72;
 +	}
 +
 +	for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
 +		struct sge_ofld_rxq *r = &s->rdmarxq[i];
 +
 +		init_rspq(&r->rspq, 0, 0, 511, 64);
 +		r->rspq.uld = CXGB4_ULD_RDMA;
 +		r->fl.size = 72;
 +	}
 +
 +	init_rspq(&s->fw_evtq, 6, 0, 512, 64);
 +	init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
 +}
 +
 +/*
 + * Reduce the number of Ethernet queues across all ports to at most n.
 + * n provides at least one queue per port.
 + */
 +static void __devinit reduce_ethqs(struct adapter *adap, int n)
 +{
 +	int i;
 +	struct port_info *pi;
 +
 +	while (n < adap->sge.ethqsets)
 +		for_each_port(adap, i) {
 +			pi = adap2pinfo(adap, i);
 +			if (pi->nqsets > 1) {
 +				pi->nqsets--;
 +				adap->sge.ethqsets--;
 +				if (adap->sge.ethqsets <= n)
 +					break;
 +			}
 +		}
 +
 +	n = 0;
 +	for_each_port(adap, i) {
 +		pi = adap2pinfo(adap, i);
 +		pi->first_qset = n;
 +		n += pi->nqsets;
 +	}
 +}
 +
 +/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
 +#define EXTRA_VECS 2
 +
 +static int __devinit enable_msix(struct adapter *adap)
 +{
 +	int ofld_need = 0;
 +	int i, err, want, need;
 +	struct sge *s = &adap->sge;
 +	unsigned int nchan = adap->params.nports;
 +	struct msix_entry entries[MAX_INGQ + 1];
 +
 +	for (i = 0; i < ARRAY_SIZE(entries); ++i)
 +		entries[i].entry = i;
 +
 +	want = s->max_ethqsets + EXTRA_VECS;
 +	if (is_offload(adap)) {
 +		want += s->rdmaqs + s->ofldqsets;
 +		/* need nchan for each possible ULD */
 +		ofld_need = 2 * nchan;
 +	}
 +	need = adap->params.nports + EXTRA_VECS + ofld_need;
 +
 +	while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
 +		want = err;
 +
 +	if (!err) {
 +		/*
 +		 * Distribute available vectors to the various queue groups.
 +		 * Every group gets its minimum requirement and NIC gets top
 +		 * priority for leftovers.
 +		 */
 +		i = want - EXTRA_VECS - ofld_need;
 +		if (i < s->max_ethqsets) {
 +			s->max_ethqsets = i;
 +			if (i < s->ethqsets)
 +				reduce_ethqs(adap, i);
 +		}
 +		if (is_offload(adap)) {
 +			i = want - EXTRA_VECS - s->max_ethqsets;
 +			i -= ofld_need - nchan;
 +			s->ofldqsets = (i / nchan) * nchan;  /* round down */
 +		}
 +		for (i = 0; i < want; ++i)
 +			adap->msix_info[i].vec = entries[i].vector;
 +	} else if (err > 0)
 +		dev_info(adap->pdev_dev,
 +			 "only %d MSI-X vectors left, not using MSI-X\n", err);
 +	return err;
 +}
 +
 +#undef EXTRA_VECS
 +
 +static int __devinit init_rss(struct adapter *adap)
 +{
 +	unsigned int i, j;
 +
 +	for_each_port(adap, i) {
 +		struct port_info *pi = adap2pinfo(adap, i);
 +
 +		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
 +		if (!pi->rss)
 +			return -ENOMEM;
 +		for (j = 0; j < pi->rss_size; j++)
 +			pi->rss[j] = j % pi->nqsets;
 +	}
 +	return 0;
 +}
 +
 +static void __devinit print_port_info(const struct net_device *dev)
 +{
 +	static const char *base[] = {
 +		"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
 +		"KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
 +	};
 +
 +	char buf[80];
 +	char *bufp = buf;
 +	const char *spd = "";
 +	const struct port_info *pi = netdev_priv(dev);
 +	const struct adapter *adap = pi->adapter;
 +
 +	if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
 +		spd = " 2.5 GT/s";
 +	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
 +		spd = " 5 GT/s";
 +
 +	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
 +		bufp += sprintf(bufp, "100/");
 +	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
 +		bufp += sprintf(bufp, "1000/");
 +	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
 +		bufp += sprintf(bufp, "10G/");
 +	if (bufp != buf)
 +		--bufp;
 +	sprintf(bufp, "BASE-%s", base[pi->port_type]);
 +
 +	netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
 +		    adap->params.vpd.id, adap->params.rev, buf,
 +		    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
 +		    (adap->flags & USING_MSIX) ? " MSI-X" :
 +		    (adap->flags & USING_MSI) ? " MSI" : "");
 +	netdev_info(dev, "S/N: %s, E/C: %s\n",
 +		    adap->params.vpd.sn, adap->params.vpd.ec);
 +}
 +
 +static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
 +{
 +	u16 v;
 +	int pos;
 +
 +	pos = pci_pcie_cap(dev);
 +	if (pos > 0) {
 +		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
 +		v |= PCI_EXP_DEVCTL_RELAX_EN;
 +		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
 +	}
 +}
 +
 +/*
 + * Free the following resources:
 + * - memory used for tables
 + * - MSI/MSI-X
 + * - net devices
 + * - resources FW is holding for us
 + */
 +static void free_some_resources(struct adapter *adapter)
 +{
 +	unsigned int i;
 +
 +	t4_free_mem(adapter->l2t);
 +	t4_free_mem(adapter->tids.tid_tab);
 +	disable_msi(adapter);
 +
 +	for_each_port(adapter, i)
 +		if (adapter->port[i]) {
 +			kfree(adap2pinfo(adapter, i)->rss);
 +			free_netdev(adapter->port[i]);
 +		}
 +	if (adapter->flags & FW_OK)
 +		t4_fw_bye(adapter, adapter->fn);
 +}
 +
 +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
 +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
 +		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
 +
 +static int __devinit init_one(struct pci_dev *pdev,
 +			      const struct pci_device_id *ent)
 +{
 +	int func, i, err;
 +	struct port_info *pi;
 +	unsigned int highdma = 0;
 +	struct adapter *adapter = NULL;
 +
 +	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 +
 +	err = pci_request_regions(pdev, KBUILD_MODNAME);
 +	if (err) {
 +		/* Just info, some other driver may have claimed the device. */
 +		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
 +		return err;
 +	}
 +
 +	/* We control everything through one PF */
 +	func = PCI_FUNC(pdev->devfn);
 +	if (func != ent->driver_data) {
 +		pci_save_state(pdev);        /* to restore SR-IOV later */
 +		goto sriov;
 +	}
 +
 +	err = pci_enable_device(pdev);
 +	if (err) {
 +		dev_err(&pdev->dev, "cannot enable PCI device\n");
 +		goto out_release_regions;
 +	}
 +
 +	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
 +		highdma = NETIF_F_HIGHDMA;
 +		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 +		if (err) {
 +			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
 +				"coherent allocations\n");
 +			goto out_disable_device;
 +		}
 +	} else {
 +		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +		if (err) {
 +			dev_err(&pdev->dev, "no usable DMA configuration\n");
 +			goto out_disable_device;
 +		}
 +	}
 +
 +	pci_enable_pcie_error_reporting(pdev);
 +	enable_pcie_relaxed_ordering(pdev);
 +	pci_set_master(pdev);
 +	pci_save_state(pdev);
 +
 +	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
 +	if (!adapter) {
 +		err = -ENOMEM;
 +		goto out_disable_device;
 +	}
 +
 +	adapter->regs = pci_ioremap_bar(pdev, 0);
 +	if (!adapter->regs) {
 +		dev_err(&pdev->dev, "cannot map device registers\n");
 +		err = -ENOMEM;
 +		goto out_free_adapter;
 +	}
 +
 +	adapter->pdev = pdev;
 +	adapter->pdev_dev = &pdev->dev;
 +	adapter->fn = func;
 +	adapter->msg_enable = dflt_msg_enable;
 +	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
 +
 +	spin_lock_init(&adapter->stats_lock);
 +	spin_lock_init(&adapter->tid_release_lock);
 +
 +	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
 +
 +	err = t4_prep_adapter(adapter);
 +	if (err)
 +		goto out_unmap_bar;
 +	err = adap_init0(adapter);
 +	if (err)
 +		goto out_unmap_bar;
 +
 +	for_each_port(adapter, i) {
 +		struct net_device *netdev;
 +
 +		netdev = alloc_etherdev_mq(sizeof(struct port_info),
 +					   MAX_ETH_QSETS);
 +		if (!netdev) {
 +			err = -ENOMEM;
 +			goto out_free_dev;
 +		}
 +
 +		SET_NETDEV_DEV(netdev, &pdev->dev);
 +
 +		adapter->port[i] = netdev;
 +		pi = netdev_priv(netdev);
 +		pi->adapter = adapter;
 +		pi->xact_addr_filt = -1;
 +		pi->port_id = i;
 +		netdev->irq = pdev->irq;
 +
 +		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
 +			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 +			NETIF_F_RXCSUM | NETIF_F_RXHASH |
 +			NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 +		netdev->features |= netdev->hw_features | highdma;
 +		netdev->vlan_features = netdev->features & VLAN_FEAT;
 +
 +		netdev->priv_flags |= IFF_UNICAST_FLT;
 +
 +		netdev->netdev_ops = &cxgb4_netdev_ops;
 +		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
 +	}
 +
 +	pci_set_drvdata(pdev, adapter);
 +
 +	if (adapter->flags & FW_OK) {
 +		err = t4_port_init(adapter, func, func, 0);
 +		if (err)
 +			goto out_free_dev;
 +	}
 +
 +	/*
 +	 * Configure queues and allocate tables now, they can be needed as
 +	 * soon as the first register_netdev completes.
 +	 */
 +	cfg_queues(adapter);
 +
 +	adapter->l2t = t4_init_l2t();
 +	if (!adapter->l2t) {
 +		/* We tolerate a lack of L2T, giving up some functionality */
 +		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
 +		adapter->params.offload = 0;
 +	}
 +
 +	if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
 +		dev_warn(&pdev->dev, "could not allocate TID table, "
 +			 "continuing\n");
 +		adapter->params.offload = 0;
 +	}
 +
 +	/* See what interrupts we'll be using */
 +	if (msi > 1 && enable_msix(adapter) == 0)
 +		adapter->flags |= USING_MSIX;
 +	else if (msi > 0 && pci_enable_msi(pdev) == 0)
 +		adapter->flags |= USING_MSI;
 +
 +	err = init_rss(adapter);
 +	if (err)
 +		goto out_free_dev;
 +
 +	/*
 +	 * The card is now ready to go.  If any errors occur during device
 +	 * registration we do not fail the whole card but rather proceed only
 +	 * with the ports we manage to register successfully.  However we must
 +	 * register at least one net device.
 +	 */
 +	for_each_port(adapter, i) {
 +		pi = adap2pinfo(adapter, i);
 +		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
 +		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
 +
 +		err = register_netdev(adapter->port[i]);
 +		if (err)
 +			break;
 +		adapter->chan_map[pi->tx_chan] = i;
 +		print_port_info(adapter->port[i]);
 +	}
 +	if (i == 0) {
 +		dev_err(&pdev->dev, "could not register any net devices\n");
 +		goto out_free_dev;
 +	}
 +	if (err) {
 +		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
 +		err = 0;
 +	}
 +
 +	if (cxgb4_debugfs_root) {
 +		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
 +							   cxgb4_debugfs_root);
 +		setup_debugfs(adapter);
 +	}
 +
++	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
++	pdev->needs_freset = 1;
++
 +	if (is_offload(adapter))
 +		attach_ulds(adapter);
 +
 +sriov:
 +#ifdef CONFIG_PCI_IOV
 +	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
 +		if (pci_enable_sriov(pdev, num_vf[func]) == 0)
 +			dev_info(&pdev->dev,
 +				 "instantiated %u virtual functions\n",
 +				 num_vf[func]);
 +#endif
 +	return 0;
 +
 + out_free_dev:
 +	free_some_resources(adapter);
 + out_unmap_bar:
 +	iounmap(adapter->regs);
 + out_free_adapter:
 +	kfree(adapter);
 + out_disable_device:
 +	pci_disable_pcie_error_reporting(pdev);
 +	pci_disable_device(pdev);
 + out_release_regions:
 +	pci_release_regions(pdev);
 +	pci_set_drvdata(pdev, NULL);
 +	return err;
 +}
 +
 +static void __devexit remove_one(struct pci_dev *pdev)
 +{
 +	struct adapter *adapter = pci_get_drvdata(pdev);
 +
 +	pci_disable_sriov(pdev);
 +
 +	if (adapter) {
 +		int i;
 +
 +		if (is_offload(adapter))
 +			detach_ulds(adapter);
 +
 +		for_each_port(adapter, i)
 +			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
 +				unregister_netdev(adapter->port[i]);
 +
 +		if (adapter->debugfs_root)
 +			debugfs_remove_recursive(adapter->debugfs_root);
 +
 +		if (adapter->flags & FULL_INIT_DONE)
 +			cxgb_down(adapter);
 +
 +		free_some_resources(adapter);
 +		iounmap(adapter->regs);
 +		kfree(adapter);
 +		pci_disable_pcie_error_reporting(pdev);
 +		pci_disable_device(pdev);
 +		pci_release_regions(pdev);
 +		pci_set_drvdata(pdev, NULL);
 +	} else
 +		pci_release_regions(pdev);
 +}
 +
 +static struct pci_driver cxgb4_driver = {
 +	.name     = KBUILD_MODNAME,
 +	.id_table = cxgb4_pci_tbl,
 +	.probe    = init_one,
 +	.remove   = __devexit_p(remove_one),
 +	.err_handler = &cxgb4_eeh,
 +};
 +
 +static int __init cxgb4_init_module(void)
 +{
 +	int ret;
 +
 +	/* Debugfs support is optional, just warn if this fails */
 +	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
 +	if (!cxgb4_debugfs_root)
 +		pr_warning("could not create debugfs entry, continuing\n");
 +
 +	ret = pci_register_driver(&cxgb4_driver);
 +	if (ret < 0)
 +		debugfs_remove(cxgb4_debugfs_root);
 +	return ret;
 +}
 +
 +static void __exit cxgb4_cleanup_module(void)
 +{
 +	pci_unregister_driver(&cxgb4_driver);
 +	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
 +}
 +
 +module_init(cxgb4_init_module);
 +module_exit(cxgb4_cleanup_module);
diff --combined drivers/net/ethernet/ibm/ibmveth.c
index 72b84de,0000000..4da972e
mode 100644,000000..100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@@ -1,1638 -1,0 +1,1638 @@@
 +/*
 + * IBM Power Virtual Ethernet Device Driver
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation; either version 2 of the License, or
 + * (at your option) any later version.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 + *
 + * Copyright (C) IBM Corporation, 2003, 2010
 + *
 + * Authors: Dave Larson <larson1 at us.ibm.com>
 + *	    Santiago Leon <santil at linux.vnet.ibm.com>
 + *	    Brian King <brking at linux.vnet.ibm.com>
 + *	    Robert Jennings <rcj at linux.vnet.ibm.com>
 + *	    Anton Blanchard <anton at au.ibm.com>
 + */
 +
 +#include <linux/module.h>
 +#include <linux/moduleparam.h>
 +#include <linux/types.h>
 +#include <linux/errno.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/kernel.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/skbuff.h>
 +#include <linux/init.h>
 +#include <linux/interrupt.h>
 +#include <linux/mm.h>
 +#include <linux/pm.h>
 +#include <linux/ethtool.h>
 +#include <linux/in.h>
 +#include <linux/ip.h>
 +#include <linux/ipv6.h>
 +#include <linux/slab.h>
 +#include <asm/hvcall.h>
 +#include <linux/atomic.h>
 +#include <asm/vio.h>
 +#include <asm/iommu.h>
 +#include <asm/firmware.h>
 +
 +#include "ibmveth.h"
 +
 +static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
 +static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
 +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
 +
 +static struct kobj_type ktype_veth_pool;
 +
 +
 +static const char ibmveth_driver_name[] = "ibmveth";
 +static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
 +#define ibmveth_driver_version "1.04"
 +
 +MODULE_AUTHOR("Santiago Leon <santil at linux.vnet.ibm.com>");
 +MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(ibmveth_driver_version);
 +
 +static unsigned int tx_copybreak __read_mostly = 128;
 +module_param(tx_copybreak, uint, 0644);
 +MODULE_PARM_DESC(tx_copybreak,
 +	"Maximum size of packet that is copied to a new buffer on transmit");
 +
 +static unsigned int rx_copybreak __read_mostly = 128;
 +module_param(rx_copybreak, uint, 0644);
 +MODULE_PARM_DESC(rx_copybreak,
 +	"Maximum size of packet that is copied to a new buffer on receive");
 +
 +static unsigned int rx_flush __read_mostly = 0;
 +module_param(rx_flush, uint, 0644);
 +MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
 +
 +struct ibmveth_stat {
 +	char name[ETH_GSTRING_LEN];
 +	int offset;
 +};
 +
 +#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
 +#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
 +
 +struct ibmveth_stat ibmveth_stats[] = {
 +	{ "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
 +	{ "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
 +	{ "replenish_add_buff_failure",
 +			IBMVETH_STAT_OFF(replenish_add_buff_failure) },
 +	{ "replenish_add_buff_success",
 +			IBMVETH_STAT_OFF(replenish_add_buff_success) },
 +	{ "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
 +	{ "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
 +	{ "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
 +	{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
 +	{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
 +	{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
 +};
 +
 +/* simple methods of getting data from the current rxq entry */
 +static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
 +{
 +	return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
 +}
 +
 +static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
 +{
 +	return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
 +			IBMVETH_RXQ_TOGGLE_SHIFT;
 +}
 +
 +static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
 +{
 +	return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
 +}
 +
 +static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
 +{
 +	return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
 +}
 +
 +static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
 +{
 +	return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
 +}
 +
 +static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
 +{
 +	return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
 +}
 +
 +static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
 +{
 +	return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
 +}
 +
 +/* setup the initial settings for a buffer pool */
 +static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
 +				     u32 pool_index, u32 pool_size,
 +				     u32 buff_size, u32 pool_active)
 +{
 +	pool->size = pool_size;
 +	pool->index = pool_index;
 +	pool->buff_size = buff_size;
 +	pool->threshold = pool_size * 7 / 8;
 +	pool->active = pool_active;
 +}
 +
 +/* allocate and setup an buffer pool - called during open */
 +static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
 +{
 +	int i;
 +
 +	pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
 +
 +	if (!pool->free_map)
 +		return -1;
 +
 +	pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
 +	if (!pool->dma_addr) {
 +		kfree(pool->free_map);
 +		pool->free_map = NULL;
 +		return -1;
 +	}
 +
 +	pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
 +
 +	if (!pool->skbuff) {
 +		kfree(pool->dma_addr);
 +		pool->dma_addr = NULL;
 +
 +		kfree(pool->free_map);
 +		pool->free_map = NULL;
 +		return -1;
 +	}
 +
 +	memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
 +
 +	for (i = 0; i < pool->size; ++i)
 +		pool->free_map[i] = i;
 +
 +	atomic_set(&pool->available, 0);
 +	pool->producer_index = 0;
 +	pool->consumer_index = 0;
 +
 +	return 0;
 +}
 +
 +static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
 +{
 +	unsigned long offset;
 +
 +	for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
 +		asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
 +}
 +
 +/* replenish the buffers for a pool.  note that we don't need to
 + * skb_reserve these since they are used for incoming...
 + */
 +static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
 +					  struct ibmveth_buff_pool *pool)
 +{
 +	u32 i;
 +	u32 count = pool->size - atomic_read(&pool->available);
 +	u32 buffers_added = 0;
 +	struct sk_buff *skb;
 +	unsigned int free_index, index;
 +	u64 correlator;
 +	unsigned long lpar_rc;
 +	dma_addr_t dma_addr;
 +
 +	mb();
 +
 +	for (i = 0; i < count; ++i) {
 +		union ibmveth_buf_desc desc;
 +
 +		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
 +
 +		if (!skb) {
 +			netdev_dbg(adapter->netdev,
 +				   "replenish: unable to allocate skb\n");
 +			adapter->replenish_no_mem++;
 +			break;
 +		}
 +
 +		free_index = pool->consumer_index;
 +		pool->consumer_index++;
 +		if (pool->consumer_index >= pool->size)
 +			pool->consumer_index = 0;
 +		index = pool->free_map[free_index];
 +
 +		BUG_ON(index == IBM_VETH_INVALID_MAP);
 +		BUG_ON(pool->skbuff[index] != NULL);
 +
 +		dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
 +				pool->buff_size, DMA_FROM_DEVICE);
 +
 +		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
 +			goto failure;
 +
 +		pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
 +		pool->dma_addr[index] = dma_addr;
 +		pool->skbuff[index] = skb;
 +
 +		correlator = ((u64)pool->index << 32) | index;
 +		*(u64 *)skb->data = correlator;
 +
 +		desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
 +		desc.fields.address = dma_addr;
 +
 +		if (rx_flush) {
 +			unsigned int len = min(pool->buff_size,
 +						adapter->netdev->mtu +
 +						IBMVETH_BUFF_OH);
 +			ibmveth_flush_buffer(skb->data, len);
 +		}
 +		lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
 +						   desc.desc);
 +
 +		if (lpar_rc != H_SUCCESS) {
 +			goto failure;
 +		} else {
 +			buffers_added++;
 +			adapter->replenish_add_buff_success++;
 +		}
 +	}
 +
 +	mb();
 +	atomic_add(buffers_added, &(pool->available));
 +	return;
 +
 +failure:
 +	pool->free_map[free_index] = index;
 +	pool->skbuff[index] = NULL;
 +	if (pool->consumer_index == 0)
 +		pool->consumer_index = pool->size - 1;
 +	else
 +		pool->consumer_index--;
 +	if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
 +		dma_unmap_single(&adapter->vdev->dev,
 +		                 pool->dma_addr[index], pool->buff_size,
 +		                 DMA_FROM_DEVICE);
 +	dev_kfree_skb_any(skb);
 +	adapter->replenish_add_buff_failure++;
 +
 +	mb();
 +	atomic_add(buffers_added, &(pool->available));
 +}
 +
 +/* replenish routine */
 +static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
 +{
 +	int i;
 +
 +	adapter->replenish_task_cycles++;
 +
 +	for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
 +		struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
 +
 +		if (pool->active &&
 +		    (atomic_read(&pool->available) < pool->threshold))
 +			ibmveth_replenish_buffer_pool(adapter, pool);
 +	}
 +
 +	adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
 +						4096 - 8);
 +}
 +
 +/* empty and free ana buffer pool - also used to do cleanup in error paths */
 +static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
 +				     struct ibmveth_buff_pool *pool)
 +{
 +	int i;
 +
 +	kfree(pool->free_map);
 +	pool->free_map = NULL;
 +
 +	if (pool->skbuff && pool->dma_addr) {
 +		for (i = 0; i < pool->size; ++i) {
 +			struct sk_buff *skb = pool->skbuff[i];
 +			if (skb) {
 +				dma_unmap_single(&adapter->vdev->dev,
 +						 pool->dma_addr[i],
 +						 pool->buff_size,
 +						 DMA_FROM_DEVICE);
 +				dev_kfree_skb_any(skb);
 +				pool->skbuff[i] = NULL;
 +			}
 +		}
 +	}
 +
 +	if (pool->dma_addr) {
 +		kfree(pool->dma_addr);
 +		pool->dma_addr = NULL;
 +	}
 +
 +	if (pool->skbuff) {
 +		kfree(pool->skbuff);
 +		pool->skbuff = NULL;
 +	}
 +}
 +
 +/* remove a buffer from a pool */
 +static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
 +					    u64 correlator)
 +{
 +	unsigned int pool  = correlator >> 32;
 +	unsigned int index = correlator & 0xffffffffUL;
 +	unsigned int free_index;
 +	struct sk_buff *skb;
 +
 +	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 +	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 +
 +	skb = adapter->rx_buff_pool[pool].skbuff[index];
 +
 +	BUG_ON(skb == NULL);
 +
 +	adapter->rx_buff_pool[pool].skbuff[index] = NULL;
 +
 +	dma_unmap_single(&adapter->vdev->dev,
 +			 adapter->rx_buff_pool[pool].dma_addr[index],
 +			 adapter->rx_buff_pool[pool].buff_size,
 +			 DMA_FROM_DEVICE);
 +
 +	free_index = adapter->rx_buff_pool[pool].producer_index;
 +	adapter->rx_buff_pool[pool].producer_index++;
 +	if (adapter->rx_buff_pool[pool].producer_index >=
 +	    adapter->rx_buff_pool[pool].size)
 +		adapter->rx_buff_pool[pool].producer_index = 0;
 +	adapter->rx_buff_pool[pool].free_map[free_index] = index;
 +
 +	mb();
 +
 +	atomic_dec(&(adapter->rx_buff_pool[pool].available));
 +}
 +
 +/* get the current buffer on the rx queue */
 +static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
 +{
 +	u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
 +	unsigned int pool = correlator >> 32;
 +	unsigned int index = correlator & 0xffffffffUL;
 +
 +	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 +	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 +
 +	return adapter->rx_buff_pool[pool].skbuff[index];
 +}
 +
 +/* recycle the current buffer on the rx queue */
 +static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
 +{
 +	u32 q_index = adapter->rx_queue.index;
 +	u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
 +	unsigned int pool = correlator >> 32;
 +	unsigned int index = correlator & 0xffffffffUL;
 +	union ibmveth_buf_desc desc;
 +	unsigned long lpar_rc;
 +	int ret = 1;
 +
 +	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 +	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 +
 +	if (!adapter->rx_buff_pool[pool].active) {
 +		ibmveth_rxq_harvest_buffer(adapter);
 +		ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
 +		goto out;
 +	}
 +
 +	desc.fields.flags_len = IBMVETH_BUF_VALID |
 +		adapter->rx_buff_pool[pool].buff_size;
 +	desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
 +
 +	lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
 +
 +	if (lpar_rc != H_SUCCESS) {
 +		netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
 +			   "during recycle rc=%ld", lpar_rc);
 +		ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
 +		ret = 0;
 +	}
 +
 +	if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
 +		adapter->rx_queue.index = 0;
 +		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
 +	}
 +
 +out:
 +	return ret;
 +}
 +
 +static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
 +{
 +	ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
 +
 +	if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
 +		adapter->rx_queue.index = 0;
 +		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
 +	}
 +}
 +
 +static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
 +{
 +	int i;
 +	struct device *dev = &adapter->vdev->dev;
 +
 +	if (adapter->buffer_list_addr != NULL) {
 +		if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
 +			dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
 +					DMA_BIDIRECTIONAL);
 +			adapter->buffer_list_dma = DMA_ERROR_CODE;
 +		}
 +		free_page((unsigned long)adapter->buffer_list_addr);
 +		adapter->buffer_list_addr = NULL;
 +	}
 +
 +	if (adapter->filter_list_addr != NULL) {
 +		if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
 +			dma_unmap_single(dev, adapter->filter_list_dma, 4096,
 +					DMA_BIDIRECTIONAL);
 +			adapter->filter_list_dma = DMA_ERROR_CODE;
 +		}
 +		free_page((unsigned long)adapter->filter_list_addr);
 +		adapter->filter_list_addr = NULL;
 +	}
 +
 +	if (adapter->rx_queue.queue_addr != NULL) {
 +		if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
 +			dma_unmap_single(dev,
 +					adapter->rx_queue.queue_dma,
 +					adapter->rx_queue.queue_len,
 +					DMA_BIDIRECTIONAL);
 +			adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
 +		}
 +		kfree(adapter->rx_queue.queue_addr);
 +		adapter->rx_queue.queue_addr = NULL;
 +	}
 +
 +	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 +		if (adapter->rx_buff_pool[i].active)
 +			ibmveth_free_buffer_pool(adapter,
 +						 &adapter->rx_buff_pool[i]);
 +
 +	if (adapter->bounce_buffer != NULL) {
 +		if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 +			dma_unmap_single(&adapter->vdev->dev,
 +					adapter->bounce_buffer_dma,
 +					adapter->netdev->mtu + IBMVETH_BUFF_OH,
 +					DMA_BIDIRECTIONAL);
 +			adapter->bounce_buffer_dma = DMA_ERROR_CODE;
 +		}
 +		kfree(adapter->bounce_buffer);
 +		adapter->bounce_buffer = NULL;
 +	}
 +}
 +
 +static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
 +        union ibmveth_buf_desc rxq_desc, u64 mac_address)
 +{
 +	int rc, try_again = 1;
 +
 +	/*
 +	 * After a kexec the adapter will still be open, so our attempt to
 +	 * open it will fail. So if we get a failure we free the adapter and
 +	 * try again, but only once.
 +	 */
 +retry:
 +	rc = h_register_logical_lan(adapter->vdev->unit_address,
 +				    adapter->buffer_list_dma, rxq_desc.desc,
 +				    adapter->filter_list_dma, mac_address);
 +
 +	if (rc != H_SUCCESS && try_again) {
 +		do {
 +			rc = h_free_logical_lan(adapter->vdev->unit_address);
 +		} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
 +
 +		try_again = 0;
 +		goto retry;
 +	}
 +
 +	return rc;
 +}
 +
 +static int ibmveth_open(struct net_device *netdev)
 +{
 +	struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +	u64 mac_address = 0;
 +	int rxq_entries = 1;
 +	unsigned long lpar_rc;
 +	int rc;
 +	union ibmveth_buf_desc rxq_desc;
 +	int i;
 +	struct device *dev;
 +
 +	netdev_dbg(netdev, "open starting\n");
 +
 +	napi_enable(&adapter->napi);
 +
 +	for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 +		rxq_entries += adapter->rx_buff_pool[i].size;
 +
 +	adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
 +	adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
 +
 +	if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
 +		netdev_err(netdev, "unable to allocate filter or buffer list "
 +			   "pages\n");
 +		rc = -ENOMEM;
 +		goto err_out;
 +	}
 +
 +	adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
 +						rxq_entries;
 +	adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
 +						GFP_KERNEL);
 +
 +	if (!adapter->rx_queue.queue_addr) {
 +		netdev_err(netdev, "unable to allocate rx queue pages\n");
 +		rc = -ENOMEM;
 +		goto err_out;
 +	}
 +
 +	dev = &adapter->vdev->dev;
 +
 +	adapter->buffer_list_dma = dma_map_single(dev,
 +			adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
 +	adapter->filter_list_dma = dma_map_single(dev,
 +			adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
 +	adapter->rx_queue.queue_dma = dma_map_single(dev,
 +			adapter->rx_queue.queue_addr,
 +			adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
 +
 +	if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
 +	    (dma_mapping_error(dev, adapter->filter_list_dma)) ||
 +	    (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
 +		netdev_err(netdev, "unable to map filter or buffer list "
 +			   "pages\n");
 +		rc = -ENOMEM;
 +		goto err_out;
 +	}
 +
 +	adapter->rx_queue.index = 0;
 +	adapter->rx_queue.num_slots = rxq_entries;
 +	adapter->rx_queue.toggle = 1;
 +
 +	memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
 +	mac_address = mac_address >> 16;
 +
 +	rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
 +					adapter->rx_queue.queue_len;
 +	rxq_desc.fields.address = adapter->rx_queue.queue_dma;
 +
 +	netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
 +	netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
 +	netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
 +
 +	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 +
 +	lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
 +
 +	if (lpar_rc != H_SUCCESS) {
 +		netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
 +			   lpar_rc);
 +		netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
 +			   "desc:0x%llx MAC:0x%llx\n",
 +				     adapter->buffer_list_dma,
 +				     adapter->filter_list_dma,
 +				     rxq_desc.desc,
 +				     mac_address);
 +		rc = -ENONET;
 +		goto err_out;
 +	}
 +
 +	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +		if (!adapter->rx_buff_pool[i].active)
 +			continue;
 +		if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
 +			netdev_err(netdev, "unable to alloc pool\n");
 +			adapter->rx_buff_pool[i].active = 0;
 +			rc = -ENOMEM;
 +			goto err_out;
 +		}
 +	}
 +
 +	netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
 +	rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
 +			 netdev);
 +	if (rc != 0) {
 +		netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
 +			   netdev->irq, rc);
 +		do {
- 			rc = h_free_logical_lan(adapter->vdev->unit_address);
- 		} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
++			lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
++		} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
 +
 +		goto err_out;
 +	}
 +
 +	adapter->bounce_buffer =
 +	    kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
 +	if (!adapter->bounce_buffer) {
 +		netdev_err(netdev, "unable to allocate bounce buffer\n");
 +		rc = -ENOMEM;
 +		goto err_out_free_irq;
 +	}
 +	adapter->bounce_buffer_dma =
 +	    dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
 +			   netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
 +	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 +		netdev_err(netdev, "unable to map bounce buffer\n");
 +		rc = -ENOMEM;
 +		goto err_out_free_irq;
 +	}
 +
 +	netdev_dbg(netdev, "initial replenish cycle\n");
 +	ibmveth_interrupt(netdev->irq, netdev);
 +
 +	netif_start_queue(netdev);
 +
 +	netdev_dbg(netdev, "open complete\n");
 +
 +	return 0;
 +
 +err_out_free_irq:
 +	free_irq(netdev->irq, netdev);
 +err_out:
 +	ibmveth_cleanup(adapter);
 +	napi_disable(&adapter->napi);
 +	return rc;
 +}
 +
 +static int ibmveth_close(struct net_device *netdev)
 +{
 +	struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +	long lpar_rc;
 +
 +	netdev_dbg(netdev, "close starting\n");
 +
 +	napi_disable(&adapter->napi);
 +
 +	if (!adapter->pool_config)
 +		netif_stop_queue(netdev);
 +
 +	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 +
 +	do {
 +		lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
 +	} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
 +
 +	if (lpar_rc != H_SUCCESS) {
 +		netdev_err(netdev, "h_free_logical_lan failed with %lx, "
 +			   "continuing with close\n", lpar_rc);
 +	}
 +
 +	free_irq(netdev->irq, netdev);
 +
 +	adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
 +						4096 - 8);
 +
 +	ibmveth_cleanup(adapter);
 +
 +	netdev_dbg(netdev, "close complete\n");
 +
 +	return 0;
 +}
 +
 +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +	cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
 +				SUPPORTED_FIBRE);
 +	cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
 +				ADVERTISED_FIBRE);
 +	ethtool_cmd_speed_set(cmd, SPEED_1000);
 +	cmd->duplex = DUPLEX_FULL;
 +	cmd->port = PORT_FIBRE;
 +	cmd->phy_address = 0;
 +	cmd->transceiver = XCVR_INTERNAL;
 +	cmd->autoneg = AUTONEG_ENABLE;
 +	cmd->maxtxpkt = 0;
 +	cmd->maxrxpkt = 1;
 +	return 0;
 +}
 +
 +static void netdev_get_drvinfo(struct net_device *dev,
 +			       struct ethtool_drvinfo *info)
 +{
 +	strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
 +	strncpy(info->version, ibmveth_driver_version,
 +		sizeof(info->version) - 1);
 +}
 +
 +static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
 +{
 +	/*
 +	 * Since the ibmveth firmware interface does not have the
 +	 * concept of separate tx/rx checksum offload enable, if rx
 +	 * checksum is disabled we also have to disable tx checksum
 +	 * offload. Once we disable rx checksum offload, we are no
 +	 * longer allowed to send tx buffers that are not properly
 +	 * checksummed.
 +	 */
 +
 +	if (!(features & NETIF_F_RXCSUM))
 +		features &= ~NETIF_F_ALL_CSUM;
 +
 +	return features;
 +}
 +
 +static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
 +{
 +	struct ibmveth_adapter *adapter = netdev_priv(dev);
 +	unsigned long set_attr, clr_attr, ret_attr;
 +	unsigned long set_attr6, clr_attr6;
 +	long ret, ret4, ret6;
 +	int rc1 = 0, rc2 = 0;
 +	int restart = 0;
 +
 +	if (netif_running(dev)) {
 +		restart = 1;
 +		adapter->pool_config = 1;
 +		ibmveth_close(dev);
 +		adapter->pool_config = 0;
 +	}
 +
 +	set_attr = 0;
 +	clr_attr = 0;
 +	set_attr6 = 0;
 +	clr_attr6 = 0;
 +
 +	if (data) {
 +		set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
 +		set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
 +	} else {
 +		clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
 +		clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
 +	}
 +
 +	ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
 +
 +	if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
 +	    !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
 +	    (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
 +		ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
 +					 set_attr, &ret_attr);
 +
 +		if (ret4 != H_SUCCESS) {
 +			netdev_err(dev, "unable to change IPv4 checksum "
 +					"offload settings. %d rc=%ld\n",
 +					data, ret4);
 +
 +			h_illan_attributes(adapter->vdev->unit_address,
 +					   set_attr, clr_attr, &ret_attr);
 +
 +			if (data == 1)
 +				dev->features &= ~NETIF_F_IP_CSUM;
 +
 +		} else {
 +			adapter->fw_ipv4_csum_support = data;
 +		}
 +
 +		ret6 = h_illan_attributes(adapter->vdev->unit_address,
 +					 clr_attr6, set_attr6, &ret_attr);
 +
 +		if (ret6 != H_SUCCESS) {
 +			netdev_err(dev, "unable to change IPv6 checksum "
 +					"offload settings. %d rc=%ld\n",
 +					data, ret6);
 +
 +			h_illan_attributes(adapter->vdev->unit_address,
 +					   set_attr6, clr_attr6, &ret_attr);
 +
 +			if (data == 1)
 +				dev->features &= ~NETIF_F_IPV6_CSUM;
 +
 +		} else
 +			adapter->fw_ipv6_csum_support = data;
 +
 +		if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
 +			adapter->rx_csum = data;
 +		else
 +			rc1 = -EIO;
 +	} else {
 +		rc1 = -EIO;
 +		netdev_err(dev, "unable to change checksum offload settings."
 +				     " %d rc=%ld ret_attr=%lx\n", data, ret,
 +				     ret_attr);
 +	}
 +
 +	if (restart)
 +		rc2 = ibmveth_open(dev);
 +
 +	return rc1 ? rc1 : rc2;
 +}
 +
 +static int ibmveth_set_features(struct net_device *dev, u32 features)
 +{
 +	struct ibmveth_adapter *adapter = netdev_priv(dev);
 +	int rx_csum = !!(features & NETIF_F_RXCSUM);
 +	int rc;
 +
 +	if (rx_csum == adapter->rx_csum)
 +		return 0;
 +
 +	rc = ibmveth_set_csum_offload(dev, rx_csum);
 +	if (rc && !adapter->rx_csum)
 +		dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
 +
 +	return rc;
 +}
 +
 +static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 +{
 +	int i;
 +
 +	if (stringset != ETH_SS_STATS)
 +		return;
 +
 +	for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
 +		memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
 +}
 +
 +static int ibmveth_get_sset_count(struct net_device *dev, int sset)
 +{
 +	switch (sset) {
 +	case ETH_SS_STATS:
 +		return ARRAY_SIZE(ibmveth_stats);
 +	default:
 +		return -EOPNOTSUPP;
 +	}
 +}
 +
 +static void ibmveth_get_ethtool_stats(struct net_device *dev,
 +				      struct ethtool_stats *stats, u64 *data)
 +{
 +	int i;
 +	struct ibmveth_adapter *adapter = netdev_priv(dev);
 +
 +	for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
 +		data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
 +}
 +
 +static const struct ethtool_ops netdev_ethtool_ops = {
 +	.get_drvinfo		= netdev_get_drvinfo,
 +	.get_settings		= netdev_get_settings,
 +	.get_link		= ethtool_op_get_link,
 +	.get_strings		= ibmveth_get_strings,
 +	.get_sset_count		= ibmveth_get_sset_count,
 +	.get_ethtool_stats	= ibmveth_get_ethtool_stats,
 +};
 +
 +static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 +{
 +	return -EOPNOTSUPP;
 +}
 +
 +#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
 +
 +static int ibmveth_send(struct ibmveth_adapter *adapter,
 +			union ibmveth_buf_desc *descs)
 +{
 +	unsigned long correlator;
 +	unsigned int retry_count;
 +	unsigned long ret;
 +
 +	/*
 +	 * The retry count sets a maximum for the number of broadcast and
 +	 * multicast destinations within the system.
 +	 */
 +	retry_count = 1024;
 +	correlator = 0;
 +	do {
 +		ret = h_send_logical_lan(adapter->vdev->unit_address,
 +					     descs[0].desc, descs[1].desc,
 +					     descs[2].desc, descs[3].desc,
 +					     descs[4].desc, descs[5].desc,
 +					     correlator, &correlator);
 +	} while ((ret == H_BUSY) && (retry_count--));
 +
 +	if (ret != H_SUCCESS && ret != H_DROPPED) {
 +		netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
 +			   "with rc=%ld\n", ret);
 +		return 1;
 +	}
 +
 +	return 0;
 +}
 +
 +static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
 +				      struct net_device *netdev)
 +{
 +	struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +	unsigned int desc_flags;
 +	union ibmveth_buf_desc descs[6];
 +	int last, i;
 +	int force_bounce = 0;
 +	dma_addr_t dma_addr;
 +
 +	/*
 +	 * veth handles a maximum of 6 segments including the header, so
 +	 * we have to linearize the skb if there are more than this.
 +	 */
 +	if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
 +		netdev->stats.tx_dropped++;
 +		goto out;
 +	}
 +
 +	/* veth can't checksum offload UDP */
 +	if (skb->ip_summed == CHECKSUM_PARTIAL &&
 +	    ((skb->protocol == htons(ETH_P_IP) &&
 +	      ip_hdr(skb)->protocol != IPPROTO_TCP) ||
 +	     (skb->protocol == htons(ETH_P_IPV6) &&
 +	      ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
 +	    skb_checksum_help(skb)) {
 +
 +		netdev_err(netdev, "tx: failed to checksum packet\n");
 +		netdev->stats.tx_dropped++;
 +		goto out;
 +	}
 +
 +	desc_flags = IBMVETH_BUF_VALID;
 +
 +	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 +		unsigned char *buf = skb_transport_header(skb) +
 +						skb->csum_offset;
 +
 +		desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
 +
 +		/* Need to zero out the checksum */
 +		buf[0] = 0;
 +		buf[1] = 0;
 +	}
 +
 +retry_bounce:
 +	memset(descs, 0, sizeof(descs));
 +
 +	/*
 +	 * If a linear packet is below the rx threshold then
 +	 * copy it into the static bounce buffer. This avoids the
 +	 * cost of a TCE insert and remove.
 +	 */
 +	if (force_bounce || (!skb_is_nonlinear(skb) &&
 +				(skb->len < tx_copybreak))) {
 +		skb_copy_from_linear_data(skb, adapter->bounce_buffer,
 +					  skb->len);
 +
 +		descs[0].fields.flags_len = desc_flags | skb->len;
 +		descs[0].fields.address = adapter->bounce_buffer_dma;
 +
 +		if (ibmveth_send(adapter, descs)) {
 +			adapter->tx_send_failed++;
 +			netdev->stats.tx_dropped++;
 +		} else {
 +			netdev->stats.tx_packets++;
 +			netdev->stats.tx_bytes += skb->len;
 +		}
 +
 +		goto out;
 +	}
 +
 +	/* Map the header */
 +	dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
 +				  skb_headlen(skb), DMA_TO_DEVICE);
 +	if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
 +		goto map_failed;
 +
 +	descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
 +	descs[0].fields.address = dma_addr;
 +
 +	/* Map the frags */
 +	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 +		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +
 +		dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
 +					    frag->size, DMA_TO_DEVICE);
 +
 +		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
 +			goto map_failed_frags;
 +
 +		descs[i+1].fields.flags_len = desc_flags | frag->size;
 +		descs[i+1].fields.address = dma_addr;
 +	}
 +
 +	if (ibmveth_send(adapter, descs)) {
 +		adapter->tx_send_failed++;
 +		netdev->stats.tx_dropped++;
 +	} else {
 +		netdev->stats.tx_packets++;
 +		netdev->stats.tx_bytes += skb->len;
 +	}
 +
 +	dma_unmap_single(&adapter->vdev->dev,
 +			 descs[0].fields.address,
 +			 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
 +			 DMA_TO_DEVICE);
 +
 +	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
 +		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
 +			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
 +			       DMA_TO_DEVICE);
 +
 +out:
 +	dev_kfree_skb(skb);
 +	return NETDEV_TX_OK;
 +
 +map_failed_frags:
 +	last = i+1;
 +	for (i = 0; i < last; i++)
 +		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
 +			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
 +			       DMA_TO_DEVICE);
 +
 +map_failed:
 +	if (!firmware_has_feature(FW_FEATURE_CMO))
 +		netdev_err(netdev, "tx: unable to map xmit buffer\n");
 +	adapter->tx_map_failed++;
 +	skb_linearize(skb);
 +	force_bounce = 1;
 +	goto retry_bounce;
 +}
 +
 +static int ibmveth_poll(struct napi_struct *napi, int budget)
 +{
 +	struct ibmveth_adapter *adapter =
 +			container_of(napi, struct ibmveth_adapter, napi);
 +	struct net_device *netdev = adapter->netdev;
 +	int frames_processed = 0;
 +	unsigned long lpar_rc;
 +
 +restart_poll:
 +	do {
 +		if (!ibmveth_rxq_pending_buffer(adapter))
 +			break;
 +
 +		smp_rmb();
 +		if (!ibmveth_rxq_buffer_valid(adapter)) {
 +			wmb(); /* suggested by larson1 */
 +			adapter->rx_invalid_buffer++;
 +			netdev_dbg(netdev, "recycling invalid buffer\n");
 +			ibmveth_rxq_recycle_buffer(adapter);
 +		} else {
 +			struct sk_buff *skb, *new_skb;
 +			int length = ibmveth_rxq_frame_length(adapter);
 +			int offset = ibmveth_rxq_frame_offset(adapter);
 +			int csum_good = ibmveth_rxq_csum_good(adapter);
 +
 +			skb = ibmveth_rxq_get_buffer(adapter);
 +
 +			new_skb = NULL;
 +			if (length < rx_copybreak)
 +				new_skb = netdev_alloc_skb(netdev, length);
 +
 +			if (new_skb) {
 +				skb_copy_to_linear_data(new_skb,
 +							skb->data + offset,
 +							length);
 +				if (rx_flush)
 +					ibmveth_flush_buffer(skb->data,
 +						length + offset);
 +				if (!ibmveth_rxq_recycle_buffer(adapter))
 +					kfree_skb(skb);
 +				skb = new_skb;
 +			} else {
 +				ibmveth_rxq_harvest_buffer(adapter);
 +				skb_reserve(skb, offset);
 +			}
 +
 +			skb_put(skb, length);
 +			skb->protocol = eth_type_trans(skb, netdev);
 +
 +			if (csum_good)
 +				skb->ip_summed = CHECKSUM_UNNECESSARY;
 +
 +			netif_receive_skb(skb);	/* send it up */
 +
 +			netdev->stats.rx_packets++;
 +			netdev->stats.rx_bytes += length;
 +			frames_processed++;
 +		}
 +	} while (frames_processed < budget);
 +
 +	ibmveth_replenish_task(adapter);
 +
 +	if (frames_processed < budget) {
 +		/* We think we are done - reenable interrupts,
 +		 * then check once more to make sure we are done.
 +		 */
 +		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
 +				       VIO_IRQ_ENABLE);
 +
 +		BUG_ON(lpar_rc != H_SUCCESS);
 +
 +		napi_complete(napi);
 +
 +		if (ibmveth_rxq_pending_buffer(adapter) &&
 +		    napi_reschedule(napi)) {
 +			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
 +					       VIO_IRQ_DISABLE);
 +			goto restart_poll;
 +		}
 +	}
 +
 +	return frames_processed;
 +}
 +
 +static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
 +{
 +	struct net_device *netdev = dev_instance;
 +	struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +	unsigned long lpar_rc;
 +
 +	if (napi_schedule_prep(&adapter->napi)) {
 +		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
 +				       VIO_IRQ_DISABLE);
 +		BUG_ON(lpar_rc != H_SUCCESS);
 +		__napi_schedule(&adapter->napi);
 +	}
 +	return IRQ_HANDLED;
 +}
 +
 +static void ibmveth_set_multicast_list(struct net_device *netdev)
 +{
 +	struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +	unsigned long lpar_rc;
 +
 +	if ((netdev->flags & IFF_PROMISC) ||
 +	    (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
 +		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 +					   IbmVethMcastEnableRecv |
 +					   IbmVethMcastDisableFiltering,
 +					   0);
 +		if (lpar_rc != H_SUCCESS) {
 +			netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
 +				   "entering promisc mode\n", lpar_rc);
 +		}
 +	} else {
 +		struct netdev_hw_addr *ha;
 +		/* clear the filter table & disable filtering */
 +		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 +					   IbmVethMcastEnableRecv |
 +					   IbmVethMcastDisableFiltering |
 +					   IbmVethMcastClearFilterTable,
 +					   0);
 +		if (lpar_rc != H_SUCCESS) {
 +			netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
 +				   "attempting to clear filter table\n",
 +				   lpar_rc);
 +		}
 +		/* add the addresses to the filter table */
 +		netdev_for_each_mc_addr(ha, netdev) {
 +			/* add the multicast address to the filter table */
 +			unsigned long mcast_addr = 0;
 +			memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
 +			lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 +						   IbmVethMcastAddFilter,
 +						   mcast_addr);
 +			if (lpar_rc != H_SUCCESS) {
 +				netdev_err(netdev, "h_multicast_ctrl rc=%ld "
 +					   "when adding an entry to the filter "
 +					   "table\n", lpar_rc);
 +			}
 +		}
 +
 +		/* re-enable filtering */
 +		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 +					   IbmVethMcastEnableFiltering,
 +					   0);
 +		if (lpar_rc != H_SUCCESS) {
 +			netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
 +				   "enabling filtering\n", lpar_rc);
 +		}
 +	}
 +}
 +
 +static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +	struct ibmveth_adapter *adapter = netdev_priv(dev);
 +	struct vio_dev *viodev = adapter->vdev;
 +	int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
 +	int i, rc;
 +	int need_restart = 0;
 +
 +	if (new_mtu < IBMVETH_MIN_MTU)
 +		return -EINVAL;
 +
 +	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 +		if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
 +			break;
 +
 +	if (i == IBMVETH_NUM_BUFF_POOLS)
 +		return -EINVAL;
 +
 +	/* Deactivate all the buffer pools so that the next loop can activate
 +	   only the buffer pools necessary to hold the new MTU */
 +	if (netif_running(adapter->netdev)) {
 +		need_restart = 1;
 +		adapter->pool_config = 1;
 +		ibmveth_close(adapter->netdev);
 +		adapter->pool_config = 0;
 +	}
 +
 +	/* Look for an active buffer pool that can hold the new MTU */
 +	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +		adapter->rx_buff_pool[i].active = 1;
 +
 +		if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
 +			dev->mtu = new_mtu;
 +			vio_cmo_set_dev_desired(viodev,
 +						ibmveth_get_desired_dma
 +						(viodev));
 +			if (need_restart) {
 +				return ibmveth_open(adapter->netdev);
 +			}
 +			return 0;
 +		}
 +	}
 +
 +	if (need_restart && (rc = ibmveth_open(adapter->netdev)))
 +		return rc;
 +
 +	return -EINVAL;
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void ibmveth_poll_controller(struct net_device *dev)
 +{
 +	ibmveth_replenish_task(netdev_priv(dev));
 +	ibmveth_interrupt(dev->irq, dev);
 +}
 +#endif
 +
 +/**
 + * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
 + *
 + * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
 + *
 + * Return value:
 + *	Number of bytes of IO data the driver will need to perform well.
 + */
 +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
 +{
 +	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
 +	struct ibmveth_adapter *adapter;
 +	unsigned long ret;
 +	int i;
 +	int rxqentries = 1;
 +
 +	/* netdev inits at probe time along with the structures we need below*/
 +	if (netdev == NULL)
 +		return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
 +
 +	adapter = netdev_priv(netdev);
 +
 +	ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
 +	ret += IOMMU_PAGE_ALIGN(netdev->mtu);
 +
 +	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +		/* add the size of the active receive buffers */
 +		if (adapter->rx_buff_pool[i].active)
 +			ret +=
 +			    adapter->rx_buff_pool[i].size *
 +			    IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
 +			            buff_size);
 +		rxqentries += adapter->rx_buff_pool[i].size;
 +	}
 +	/* add the size of the receive queue entries */
 +	ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
 +
 +	return ret;
 +}
 +
 +static const struct net_device_ops ibmveth_netdev_ops = {
 +	.ndo_open		= ibmveth_open,
 +	.ndo_stop		= ibmveth_close,
 +	.ndo_start_xmit		= ibmveth_start_xmit,
 +	.ndo_set_rx_mode	= ibmveth_set_multicast_list,
 +	.ndo_do_ioctl		= ibmveth_ioctl,
 +	.ndo_change_mtu		= ibmveth_change_mtu,
 +	.ndo_fix_features	= ibmveth_fix_features,
 +	.ndo_set_features	= ibmveth_set_features,
 +	.ndo_validate_addr	= eth_validate_addr,
 +	.ndo_set_mac_address	= eth_mac_addr,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +	.ndo_poll_controller	= ibmveth_poll_controller,
 +#endif
 +};
 +
 +static int __devinit ibmveth_probe(struct vio_dev *dev,
 +				   const struct vio_device_id *id)
 +{
 +	int rc, i;
 +	struct net_device *netdev;
 +	struct ibmveth_adapter *adapter;
 +	unsigned char *mac_addr_p;
 +	unsigned int *mcastFilterSize_p;
 +
 +	dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
 +		dev->unit_address);
 +
 +	mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
 +							NULL);
 +	if (!mac_addr_p) {
 +		dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
 +		return -EINVAL;
 +	}
 +
 +	mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
 +						VETH_MCAST_FILTER_SIZE, NULL);
 +	if (!mcastFilterSize_p) {
 +		dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
 +			"attribute\n");
 +		return -EINVAL;
 +	}
 +
 +	netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
 +
 +	if (!netdev)
 +		return -ENOMEM;
 +
 +	adapter = netdev_priv(netdev);
 +	dev_set_drvdata(&dev->dev, netdev);
 +
 +	adapter->vdev = dev;
 +	adapter->netdev = netdev;
 +	adapter->mcastFilterSize = *mcastFilterSize_p;
 +	adapter->pool_config = 0;
 +
 +	netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
 +
 +	/*
 +	 * Some older boxes running PHYP non-natively have an OF that returns
 +	 * a 8-byte local-mac-address field (and the first 2 bytes have to be
 +	 * ignored) while newer boxes' OF return a 6-byte field. Note that
 +	 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
 +	 * The RPA doc specifies that the first byte must be 10b, so we'll
 +	 * just look for it to solve this 8 vs. 6 byte field issue
 +	 */
 +	if ((*mac_addr_p & 0x3) != 0x02)
 +		mac_addr_p += 2;
 +
 +	adapter->mac_addr = 0;
 +	memcpy(&adapter->mac_addr, mac_addr_p, 6);
 +
 +	netdev->irq = dev->irq;
 +	netdev->netdev_ops = &ibmveth_netdev_ops;
 +	netdev->ethtool_ops = &netdev_ethtool_ops;
 +	SET_NETDEV_DEV(netdev, &dev->dev);
 +	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
 +		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 +	netdev->features |= netdev->hw_features;
 +
 +	memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
 +
 +	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +		struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
 +		int error;
 +
 +		ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
 +					 pool_count[i], pool_size[i],
 +					 pool_active[i]);
 +		error = kobject_init_and_add(kobj, &ktype_veth_pool,
 +					     &dev->dev.kobj, "pool%d", i);
 +		if (!error)
 +			kobject_uevent(kobj, KOBJ_ADD);
 +	}
 +
 +	netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
 +
 +	adapter->buffer_list_dma = DMA_ERROR_CODE;
 +	adapter->filter_list_dma = DMA_ERROR_CODE;
 +	adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
 +
 +	netdev_dbg(netdev, "registering netdev...\n");
 +
 +	ibmveth_set_features(netdev, netdev->features);
 +
 +	rc = register_netdev(netdev);
 +
 +	if (rc) {
 +		netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
 +		free_netdev(netdev);
 +		return rc;
 +	}
 +
 +	netdev_dbg(netdev, "registered\n");
 +
 +	return 0;
 +}
 +
 +static int __devexit ibmveth_remove(struct vio_dev *dev)
 +{
 +	struct net_device *netdev = dev_get_drvdata(&dev->dev);
 +	struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +	int i;
 +
 +	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 +		kobject_put(&adapter->rx_buff_pool[i].kobj);
 +
 +	unregister_netdev(netdev);
 +
 +	free_netdev(netdev);
 +	dev_set_drvdata(&dev->dev, NULL);
 +
 +	return 0;
 +}
 +
 +static struct attribute veth_active_attr;
 +static struct attribute veth_num_attr;
 +static struct attribute veth_size_attr;
 +
 +static ssize_t veth_pool_show(struct kobject *kobj,
 +			      struct attribute *attr, char *buf)
 +{
 +	struct ibmveth_buff_pool *pool = container_of(kobj,
 +						      struct ibmveth_buff_pool,
 +						      kobj);
 +
 +	if (attr == &veth_active_attr)
 +		return sprintf(buf, "%d\n", pool->active);
 +	else if (attr == &veth_num_attr)
 +		return sprintf(buf, "%d\n", pool->size);
 +	else if (attr == &veth_size_attr)
 +		return sprintf(buf, "%d\n", pool->buff_size);
 +	return 0;
 +}
 +
 +static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
 +			       const char *buf, size_t count)
 +{
 +	struct ibmveth_buff_pool *pool = container_of(kobj,
 +						      struct ibmveth_buff_pool,
 +						      kobj);
 +	struct net_device *netdev = dev_get_drvdata(
 +	    container_of(kobj->parent, struct device, kobj));
 +	struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +	long value = simple_strtol(buf, NULL, 10);
 +	long rc;
 +
 +	if (attr == &veth_active_attr) {
 +		if (value && !pool->active) {
 +			if (netif_running(netdev)) {
 +				if (ibmveth_alloc_buffer_pool(pool)) {
 +					netdev_err(netdev,
 +						   "unable to alloc pool\n");
 +					return -ENOMEM;
 +				}
 +				pool->active = 1;
 +				adapter->pool_config = 1;
 +				ibmveth_close(netdev);
 +				adapter->pool_config = 0;
 +				if ((rc = ibmveth_open(netdev)))
 +					return rc;
 +			} else {
 +				pool->active = 1;
 +			}
 +		} else if (!value && pool->active) {
 +			int mtu = netdev->mtu + IBMVETH_BUFF_OH;
 +			int i;
 +			/* Make sure there is a buffer pool with buffers that
 +			   can hold a packet of the size of the MTU */
 +			for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +				if (pool == &adapter->rx_buff_pool[i])
 +					continue;
 +				if (!adapter->rx_buff_pool[i].active)
 +					continue;
 +				if (mtu <= adapter->rx_buff_pool[i].buff_size)
 +					break;
 +			}
 +
 +			if (i == IBMVETH_NUM_BUFF_POOLS) {
 +				netdev_err(netdev, "no active pool >= MTU\n");
 +				return -EPERM;
 +			}
 +
 +			if (netif_running(netdev)) {
 +				adapter->pool_config = 1;
 +				ibmveth_close(netdev);
 +				pool->active = 0;
 +				adapter->pool_config = 0;
 +				if ((rc = ibmveth_open(netdev)))
 +					return rc;
 +			}
 +			pool->active = 0;
 +		}
 +	} else if (attr == &veth_num_attr) {
 +		if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
 +			return -EINVAL;
 +		} else {
 +			if (netif_running(netdev)) {
 +				adapter->pool_config = 1;
 +				ibmveth_close(netdev);
 +				adapter->pool_config = 0;
 +				pool->size = value;
 +				if ((rc = ibmveth_open(netdev)))
 +					return rc;
 +			} else {
 +				pool->size = value;
 +			}
 +		}
 +	} else if (attr == &veth_size_attr) {
 +		if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
 +			return -EINVAL;
 +		} else {
 +			if (netif_running(netdev)) {
 +				adapter->pool_config = 1;
 +				ibmveth_close(netdev);
 +				adapter->pool_config = 0;
 +				pool->buff_size = value;
 +				if ((rc = ibmveth_open(netdev)))
 +					return rc;
 +			} else {
 +				pool->buff_size = value;
 +			}
 +		}
 +	}
 +
 +	/* kick the interrupt handler to allocate/deallocate pools */
 +	ibmveth_interrupt(netdev->irq, netdev);
 +	return count;
 +}
 +
 +
 +#define ATTR(_name, _mode)				\
 +	struct attribute veth_##_name##_attr = {	\
 +	.name = __stringify(_name), .mode = _mode,	\
 +	};
 +
 +static ATTR(active, 0644);
 +static ATTR(num, 0644);
 +static ATTR(size, 0644);
 +
 +static struct attribute *veth_pool_attrs[] = {
 +	&veth_active_attr,
 +	&veth_num_attr,
 +	&veth_size_attr,
 +	NULL,
 +};
 +
 +static const struct sysfs_ops veth_pool_ops = {
 +	.show   = veth_pool_show,
 +	.store  = veth_pool_store,
 +};
 +
 +static struct kobj_type ktype_veth_pool = {
 +	.release        = NULL,
 +	.sysfs_ops      = &veth_pool_ops,
 +	.default_attrs  = veth_pool_attrs,
 +};
 +
 +static int ibmveth_resume(struct device *dev)
 +{
 +	struct net_device *netdev = dev_get_drvdata(dev);
 +	ibmveth_interrupt(netdev->irq, netdev);
 +	return 0;
 +}
 +
 +static struct vio_device_id ibmveth_device_table[] __devinitdata = {
 +	{ "network", "IBM,l-lan"},
 +	{ "", "" }
 +};
 +MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
 +
 +static struct dev_pm_ops ibmveth_pm_ops = {
 +	.resume = ibmveth_resume
 +};
 +
 +static struct vio_driver ibmveth_driver = {
 +	.id_table	= ibmveth_device_table,
 +	.probe		= ibmveth_probe,
 +	.remove		= ibmveth_remove,
 +	.get_desired_dma = ibmveth_get_desired_dma,
 +	.driver		= {
 +		.name	= ibmveth_driver_name,
 +		.owner	= THIS_MODULE,
 +		.pm = &ibmveth_pm_ops,
 +	}
 +};
 +
 +static int __init ibmveth_module_init(void)
 +{
 +	printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
 +	       ibmveth_driver_string, ibmveth_driver_version);
 +
 +	return vio_register_driver(&ibmveth_driver);
 +}
 +
 +static void __exit ibmveth_module_exit(void)
 +{
 +	vio_unregister_driver(&ibmveth_driver);
 +}
 +
 +module_init(ibmveth_module_init);
 +module_exit(ibmveth_module_exit);
diff --combined drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 5dc61b4,0000000..b89f3a6
mode 100644,000000..100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@@ -1,2606 -1,0 +1,2604 @@@
 +/*
 + * Copyright (C) 1999 - 2010 Intel Corporation.
 + * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
 + *
 + * This code was derived from the Intel e1000e Linux driver.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation; version 2 of the License.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
 + */
 +
 +#include "pch_gbe.h"
 +#include "pch_gbe_api.h"
 +
 +#define DRV_VERSION     "1.00"
 +const char pch_driver_version[] = DRV_VERSION;
 +
 +#define PCI_DEVICE_ID_INTEL_IOH1_GBE	0x8802		/* Pci device ID */
 +#define PCH_GBE_MAR_ENTRIES		16
 +#define PCH_GBE_SHORT_PKT		64
 +#define DSC_INIT16			0xC000
 +#define PCH_GBE_DMA_ALIGN		0
 +#define PCH_GBE_DMA_PADDING		2
 +#define PCH_GBE_WATCHDOG_PERIOD		(1 * HZ)	/* watchdog time */
 +#define PCH_GBE_COPYBREAK_DEFAULT	256
 +#define PCH_GBE_PCI_BAR			1
 +#define PCH_GBE_RESERVE_MEMORY		0x200000	/* 2MB */
 +
 +/* Macros for ML7223 */
 +#define PCI_VENDOR_ID_ROHM			0x10db
 +#define PCI_DEVICE_ID_ROHM_ML7223_GBE		0x8013
 +
 +/* Macros for ML7831 */
 +#define PCI_DEVICE_ID_ROHM_ML7831_GBE		0x8802
 +
 +#define PCH_GBE_TX_WEIGHT         64
 +#define PCH_GBE_RX_WEIGHT         64
 +#define PCH_GBE_RX_BUFFER_WRITE   16
 +
 +/* Initialize the wake-on-LAN settings */
 +#define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
 +
 +#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
 +	PCH_GBE_CHIP_TYPE_INTERNAL | \
 +	PCH_GBE_RGMII_MODE_RGMII     \
 +	)
 +
 +/* Ethertype field values */
 +#define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
 +#define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
 +#define PCH_GBE_FRAME_SIZE_2048         2048
 +#define PCH_GBE_FRAME_SIZE_4096         4096
 +#define PCH_GBE_FRAME_SIZE_8192         8192
 +
 +#define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
 +#define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
 +#define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
 +#define PCH_GBE_DESC_UNUSED(R) \
 +	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
 +	(R)->next_to_clean - (R)->next_to_use - 1)
 +
 +/* Pause packet value */
 +#define	PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
 +#define	PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
 +#define	PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
 +#define	PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
 +
 +#define PCH_GBE_ETH_ALEN            6
 +
 +/* This defines the bits that are set in the Interrupt Mask
 + * Set/Read Register.  Each bit is documented below:
 + *   o RXT0   = Receiver Timer Interrupt (ring 0)
 + *   o TXDW   = Transmit Descriptor Written Back
 + *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
 + *   o RXSEQ  = Receive Sequence Error
 + *   o LSC    = Link Status Change
 + */
 +#define PCH_GBE_INT_ENABLE_MASK ( \
 +	PCH_GBE_INT_RX_DMA_CMPLT |    \
 +	PCH_GBE_INT_RX_DSC_EMP   |    \
 +	PCH_GBE_INT_RX_FIFO_ERR  |    \
 +	PCH_GBE_INT_WOL_DET      |    \
 +	PCH_GBE_INT_TX_CMPLT          \
 +	)
 +
 +#define PCH_GBE_INT_DISABLE_ALL		0
 +
 +static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 +
 +static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 +static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
 +			       int data);
 +
 +inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
 +{
 +	iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
 +}
 +
 +/**
 + * pch_gbe_mac_read_mac_addr - Read MAC address
 + * @hw:	            Pointer to the HW structure
 + * Returns
 + *	0:			Successful.
 + */
 +s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
 +{
 +	u32  adr1a, adr1b;
 +
 +	adr1a = ioread32(&hw->reg->mac_adr[0].high);
 +	adr1b = ioread32(&hw->reg->mac_adr[0].low);
 +
 +	hw->mac.addr[0] = (u8)(adr1a & 0xFF);
 +	hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
 +	hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
 +	hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
 +	hw->mac.addr[4] = (u8)(adr1b & 0xFF);
 +	hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
 +
 +	pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_wait_clr_bit - Wait to clear a bit
 + * @reg:	Pointer of register
 + * @busy:	Busy bit
 + */
 +static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
 +{
 +	u32 tmp;
 +	/* wait busy */
 +	tmp = 1000;
 +	while ((ioread32(reg) & bit) && --tmp)
 +		cpu_relax();
 +	if (!tmp)
 +		pr_err("Error: busy bit is not cleared\n");
 +}
 +
 +/**
 + * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
 + * @reg:	Pointer of register
 + * @busy:	Busy bit
 + */
 +static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
 +{
 +	u32 tmp;
 +	int ret = -1;
 +	/* wait busy */
 +	tmp = 20;
 +	while ((ioread32(reg) & bit) && --tmp)
 +		udelay(5);
 +	if (!tmp)
 +		pr_err("Error: busy bit is not cleared\n");
 +	else
 +		ret = 0;
 +	return ret;
 +}
 +
 +/**
 + * pch_gbe_mac_mar_set - Set MAC address register
 + * @hw:	    Pointer to the HW structure
 + * @addr:   Pointer to the MAC address
 + * @index:  MAC address array register
 + */
 +static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
 +{
 +	u32 mar_low, mar_high, adrmask;
 +
 +	pr_debug("index : 0x%x\n", index);
 +
 +	/*
 +	 * HW expects these in little endian so we reverse the byte order
 +	 * from network order (big endian) to little endian
 +	 */
 +	mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
 +		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
 +	mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
 +	/* Stop the MAC Address of index. */
 +	adrmask = ioread32(&hw->reg->ADDR_MASK);
 +	iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
 +	/* wait busy */
 +	pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 +	/* Set the MAC address to the MAC address 1A/1B register */
 +	iowrite32(mar_high, &hw->reg->mac_adr[index].high);
 +	iowrite32(mar_low, &hw->reg->mac_adr[index].low);
 +	/* Start the MAC address of index */
 +	iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
 +	/* wait busy */
 +	pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 +}
 +
 +/**
 + * pch_gbe_mac_reset_hw - Reset hardware
 + * @hw:	Pointer to the HW structure
 + */
 +static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
 +{
 +	/* Read the MAC address. and store to the private data */
 +	pch_gbe_mac_read_mac_addr(hw);
 +	iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
 +#ifdef PCH_GBE_MAC_IFOP_RGMII
 +	iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
 +#endif
 +	pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
 +	/* Setup the receive address */
 +	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 +	return;
 +}
 +
 +static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
 +{
 +	/* Read the MAC address. and store to the private data */
 +	pch_gbe_mac_read_mac_addr(hw);
 +	iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
 +	pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
 +	/* Setup the MAC address */
 +	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 +	return;
 +}
 +
 +/**
 + * pch_gbe_mac_init_rx_addrs - Initialize receive address's
 + * @hw:	Pointer to the HW structure
 + * @mar_count: Receive address registers
 + */
 +static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
 +{
 +	u32 i;
 +
 +	/* Setup the receive address */
 +	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 +
 +	/* Zero out the other receive addresses */
 +	for (i = 1; i < mar_count; i++) {
 +		iowrite32(0, &hw->reg->mac_adr[i].high);
 +		iowrite32(0, &hw->reg->mac_adr[i].low);
 +	}
 +	iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
 +	/* wait busy */
 +	pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 +}
 +
 +
 +/**
 + * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
 + * @hw:	            Pointer to the HW structure
 + * @mc_addr_list:   Array of multicast addresses to program
 + * @mc_addr_count:  Number of multicast addresses to program
 + * @mar_used_count: The first MAC Address register free to program
 + * @mar_total_num:  Total number of supported MAC Address Registers
 + */
 +static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
 +					    u8 *mc_addr_list, u32 mc_addr_count,
 +					    u32 mar_used_count, u32 mar_total_num)
 +{
 +	u32 i, adrmask;
 +
 +	/* Load the first set of multicast addresses into the exact
 +	 * filters (RAR).  If there are not enough to fill the RAR
 +	 * array, clear the filters.
 +	 */
 +	for (i = mar_used_count; i < mar_total_num; i++) {
 +		if (mc_addr_count) {
 +			pch_gbe_mac_mar_set(hw, mc_addr_list, i);
 +			mc_addr_count--;
 +			mc_addr_list += PCH_GBE_ETH_ALEN;
 +		} else {
 +			/* Clear MAC address mask */
 +			adrmask = ioread32(&hw->reg->ADDR_MASK);
 +			iowrite32((adrmask | (0x0001 << i)),
 +					&hw->reg->ADDR_MASK);
 +			/* wait busy */
 +			pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 +			/* Clear MAC address */
 +			iowrite32(0, &hw->reg->mac_adr[i].high);
 +			iowrite32(0, &hw->reg->mac_adr[i].low);
 +		}
 +	}
 +}
 +
 +/**
 + * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
 + * @hw:	            Pointer to the HW structure
 + * Returns
 + *	0:			Successful.
 + *	Negative value:		Failed.
 + */
 +s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
 +{
 +	struct pch_gbe_mac_info *mac = &hw->mac;
 +	u32 rx_fctrl;
 +
 +	pr_debug("mac->fc = %u\n", mac->fc);
 +
 +	rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
 +
 +	switch (mac->fc) {
 +	case PCH_GBE_FC_NONE:
 +		rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 +		mac->tx_fc_enable = false;
 +		break;
 +	case PCH_GBE_FC_RX_PAUSE:
 +		rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 +		mac->tx_fc_enable = false;
 +		break;
 +	case PCH_GBE_FC_TX_PAUSE:
 +		rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 +		mac->tx_fc_enable = true;
 +		break;
 +	case PCH_GBE_FC_FULL:
 +		rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 +		mac->tx_fc_enable = true;
 +		break;
 +	default:
 +		pr_err("Flow control param set incorrectly\n");
 +		return -EINVAL;
 +	}
 +	if (mac->link_duplex == DUPLEX_HALF)
 +		rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 +	iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
 +	pr_debug("RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
 +		 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_mac_set_wol_event - Set wake-on-lan event
 + * @hw:     Pointer to the HW structure
 + * @wu_evt: Wake up event
 + */
 +static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
 +{
 +	u32 addr_mask;
 +
 +	pr_debug("wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
 +		 wu_evt, ioread32(&hw->reg->ADDR_MASK));
 +
 +	if (wu_evt) {
 +		/* Set Wake-On-Lan address mask */
 +		addr_mask = ioread32(&hw->reg->ADDR_MASK);
 +		iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
 +		/* wait busy */
 +		pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
 +		iowrite32(0, &hw->reg->WOL_ST);
 +		iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
 +		iowrite32(0x02, &hw->reg->TCPIP_ACC);
 +		iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 +	} else {
 +		iowrite32(0, &hw->reg->WOL_CTRL);
 +		iowrite32(0, &hw->reg->WOL_ST);
 +	}
 +	return;
 +}
 +
 +/**
 + * pch_gbe_mac_ctrl_miim - Control MIIM interface
 + * @hw:   Pointer to the HW structure
 + * @addr: Address of PHY
 + * @dir:  Operetion. (Write or Read)
 + * @reg:  Access register of PHY
 + * @data: Write data.
 + *
 + * Returns: Read date.
 + */
 +u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
 +			u16 data)
 +{
 +	u32 data_out = 0;
 +	unsigned int i;
 +	unsigned long flags;
 +
 +	spin_lock_irqsave(&hw->miim_lock, flags);
 +
 +	for (i = 100; i; --i) {
 +		if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
 +			break;
 +		udelay(20);
 +	}
 +	if (i == 0) {
 +		pr_err("pch-gbe.miim won't go Ready\n");
 +		spin_unlock_irqrestore(&hw->miim_lock, flags);
 +		return 0;	/* No way to indicate timeout error */
 +	}
 +	iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
 +		  (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
 +		  dir | data), &hw->reg->MIIM);
 +	for (i = 0; i < 100; i++) {
 +		udelay(20);
 +		data_out = ioread32(&hw->reg->MIIM);
 +		if ((data_out & PCH_GBE_MIIM_OPER_READY))
 +			break;
 +	}
 +	spin_unlock_irqrestore(&hw->miim_lock, flags);
 +
 +	pr_debug("PHY %s: reg=%d, data=0x%04X\n",
 +		 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
 +		 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
 +	return (u16) data_out;
 +}
 +
 +/**
 + * pch_gbe_mac_set_pause_packet - Set pause packet
 + * @hw:   Pointer to the HW structure
 + */
 +static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
 +{
 +	unsigned long tmp2, tmp3;
 +
 +	/* Set Pause packet */
 +	tmp2 = hw->mac.addr[1];
 +	tmp2 = (tmp2 << 8) | hw->mac.addr[0];
 +	tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
 +
 +	tmp3 = hw->mac.addr[5];
 +	tmp3 = (tmp3 << 8) | hw->mac.addr[4];
 +	tmp3 = (tmp3 << 8) | hw->mac.addr[3];
 +	tmp3 = (tmp3 << 8) | hw->mac.addr[2];
 +
 +	iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
 +	iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
 +	iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
 +	iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
 +	iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
 +
 +	/* Transmit Pause Packet */
 +	iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
 +
 +	pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
 +		 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
 +		 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
 +		 ioread32(&hw->reg->PAUSE_PKT5));
 +
 +	return;
 +}
 +
 +
 +/**
 + * pch_gbe_alloc_queues - Allocate memory for all rings
 + * @adapter:  Board private structure to initialize
 + * Returns
 + *	0:	Successfully
 + *	Negative value:	Failed
 + */
 +static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
 +{
 +	int size;
 +
 +	size = (int)sizeof(struct pch_gbe_tx_ring);
 +	adapter->tx_ring = kzalloc(size, GFP_KERNEL);
 +	if (!adapter->tx_ring)
 +		return -ENOMEM;
 +	size = (int)sizeof(struct pch_gbe_rx_ring);
 +	adapter->rx_ring = kzalloc(size, GFP_KERNEL);
 +	if (!adapter->rx_ring) {
 +		kfree(adapter->tx_ring);
 +		return -ENOMEM;
 +	}
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_init_stats - Initialize status
 + * @adapter:  Board private structure to initialize
 + */
 +static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
 +{
 +	memset(&adapter->stats, 0, sizeof(adapter->stats));
 +	return;
 +}
 +
 +/**
 + * pch_gbe_init_phy - Initialize PHY
 + * @adapter:  Board private structure to initialize
 + * Returns
 + *	0:	Successfully
 + *	Negative value:	Failed
 + */
 +static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	u32 addr;
 +	u16 bmcr, stat;
 +
 +	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
 +	for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 +		adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
 +		bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
 +		stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 +		stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 +		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
 +			break;
 +	}
 +	adapter->hw.phy.addr = adapter->mii.phy_id;
 +	pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
 +	if (addr == 32)
 +		return -EAGAIN;
 +	/* Selected the phy and isolate the rest */
 +	for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 +		if (addr != adapter->mii.phy_id) {
 +			pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 +					   BMCR_ISOLATE);
 +		} else {
 +			bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
 +			pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 +					   bmcr & ~BMCR_ISOLATE);
 +		}
 +	}
 +
 +	/* MII setup */
 +	adapter->mii.phy_id_mask = 0x1F;
 +	adapter->mii.reg_num_mask = 0x1F;
 +	adapter->mii.dev = adapter->netdev;
 +	adapter->mii.mdio_read = pch_gbe_mdio_read;
 +	adapter->mii.mdio_write = pch_gbe_mdio_write;
 +	adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_mdio_read - The read function for mii
 + * @netdev: Network interface device structure
 + * @addr:   Phy ID
 + * @reg:    Access location
 + * Returns
 + *	0:	Successfully
 + *	Negative value:	Failed
 + */
 +static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +
 +	return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
 +				     (u16) 0);
 +}
 +
 +/**
 + * pch_gbe_mdio_write - The write function for mii
 + * @netdev: Network interface device structure
 + * @addr:   Phy ID (not used)
 + * @reg:    Access location
 + * @data:   Write data
 + */
 +static void pch_gbe_mdio_write(struct net_device *netdev,
 +			       int addr, int reg, int data)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +
 +	pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
 +}
 +
 +/**
 + * pch_gbe_reset_task - Reset processing at the time of transmission timeout
 + * @work:  Pointer of board private structure
 + */
 +static void pch_gbe_reset_task(struct work_struct *work)
 +{
 +	struct pch_gbe_adapter *adapter;
 +	adapter = container_of(work, struct pch_gbe_adapter, reset_task);
 +
 +	rtnl_lock();
 +	pch_gbe_reinit_locked(adapter);
 +	rtnl_unlock();
 +}
 +
 +/**
 + * pch_gbe_reinit_locked- Re-initialization
 + * @adapter:  Board private structure
 + */
 +void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 +{
 +	pch_gbe_down(adapter);
 +	pch_gbe_up(adapter);
 +}
 +
 +/**
 + * pch_gbe_reset - Reset GbE
 + * @adapter:  Board private structure
 + */
 +void pch_gbe_reset(struct pch_gbe_adapter *adapter)
 +{
 +	pch_gbe_mac_reset_hw(&adapter->hw);
 +	/* Setup the receive address. */
 +	pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
 +	if (pch_gbe_hal_init_hw(&adapter->hw))
 +		pr_err("Hardware Error\n");
 +}
 +
 +/**
 + * pch_gbe_free_irq - Free an interrupt
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +
 +	free_irq(adapter->pdev->irq, netdev);
 +	if (adapter->have_msi) {
 +		pci_disable_msi(adapter->pdev);
 +		pr_debug("call pci_disable_msi\n");
 +	}
 +}
 +
 +/**
 + * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +
 +	atomic_inc(&adapter->irq_sem);
 +	iowrite32(0, &hw->reg->INT_EN);
 +	ioread32(&hw->reg->INT_ST);
 +	synchronize_irq(adapter->pdev->irq);
 +
 +	pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
 +}
 +
 +/**
 + * pch_gbe_irq_enable - Enable default interrupt generation settings
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +
 +	if (likely(atomic_dec_and_test(&adapter->irq_sem)))
 +		iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 +	ioread32(&hw->reg->INT_ST);
 +	pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
 +}
 +
 +
 +
 +/**
 + * pch_gbe_setup_tctl - configure the Transmit control registers
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	u32 tx_mode, tcpip;
 +
 +	tx_mode = PCH_GBE_TM_LONG_PKT |
 +		PCH_GBE_TM_ST_AND_FD |
 +		PCH_GBE_TM_SHORT_PKT |
 +		PCH_GBE_TM_TH_TX_STRT_8 |
 +		PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
 +
 +	iowrite32(tx_mode, &hw->reg->TX_MODE);
 +
 +	tcpip = ioread32(&hw->reg->TCPIP_ACC);
 +	tcpip |= PCH_GBE_TX_TCPIPACC_EN;
 +	iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 +	return;
 +}
 +
 +/**
 + * pch_gbe_configure_tx - Configure Transmit Unit after Reset
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	u32 tdba, tdlen, dctrl;
 +
 +	pr_debug("dma addr = 0x%08llx  size = 0x%08x\n",
 +		 (unsigned long long)adapter->tx_ring->dma,
 +		 adapter->tx_ring->size);
 +
 +	/* Setup the HW Tx Head and Tail descriptor pointers */
 +	tdba = adapter->tx_ring->dma;
 +	tdlen = adapter->tx_ring->size - 0x10;
 +	iowrite32(tdba, &hw->reg->TX_DSC_BASE);
 +	iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
 +	iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
 +
 +	/* Enables Transmission DMA */
 +	dctrl = ioread32(&hw->reg->DMA_CTRL);
 +	dctrl |= PCH_GBE_TX_DMA_EN;
 +	iowrite32(dctrl, &hw->reg->DMA_CTRL);
 +}
 +
 +/**
 + * pch_gbe_setup_rctl - Configure the receive control registers
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	u32 rx_mode, tcpip;
 +
 +	rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
 +	PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
 +
 +	iowrite32(rx_mode, &hw->reg->RX_MODE);
 +
 +	tcpip = ioread32(&hw->reg->TCPIP_ACC);
 +
 +	tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
 +	tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
 +	iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 +	return;
 +}
 +
 +/**
 + * pch_gbe_configure_rx - Configure Receive Unit after Reset
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	u32 rdba, rdlen, rctl, rxdma;
 +
 +	pr_debug("dma adr = 0x%08llx  size = 0x%08x\n",
 +		 (unsigned long long)adapter->rx_ring->dma,
 +		 adapter->rx_ring->size);
 +
 +	pch_gbe_mac_force_mac_fc(hw);
 +
 +	/* Disables Receive MAC */
 +	rctl = ioread32(&hw->reg->MAC_RX_EN);
 +	iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
 +
 +	/* Disables Receive DMA */
 +	rxdma = ioread32(&hw->reg->DMA_CTRL);
 +	rxdma &= ~PCH_GBE_RX_DMA_EN;
 +	iowrite32(rxdma, &hw->reg->DMA_CTRL);
 +
 +	pr_debug("MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
 +		 ioread32(&hw->reg->MAC_RX_EN),
 +		 ioread32(&hw->reg->DMA_CTRL));
 +
 +	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 +	 * the Base and Length of the Rx Descriptor Ring */
 +	rdba = adapter->rx_ring->dma;
 +	rdlen = adapter->rx_ring->size - 0x10;
 +	iowrite32(rdba, &hw->reg->RX_DSC_BASE);
 +	iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
 +	iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
 +}
 +
 +/**
 + * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
 + * @adapter:     Board private structure
 + * @buffer_info: Buffer information structure
 + */
 +static void pch_gbe_unmap_and_free_tx_resource(
 +	struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
 +{
 +	if (buffer_info->mapped) {
 +		dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 +				 buffer_info->length, DMA_TO_DEVICE);
 +		buffer_info->mapped = false;
 +	}
 +	if (buffer_info->skb) {
 +		dev_kfree_skb_any(buffer_info->skb);
 +		buffer_info->skb = NULL;
 +	}
 +}
 +
 +/**
 + * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
 + * @adapter:      Board private structure
 + * @buffer_info:  Buffer information structure
 + */
 +static void pch_gbe_unmap_and_free_rx_resource(
 +					struct pch_gbe_adapter *adapter,
 +					struct pch_gbe_buffer *buffer_info)
 +{
 +	if (buffer_info->mapped) {
 +		dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 +				 buffer_info->length, DMA_FROM_DEVICE);
 +		buffer_info->mapped = false;
 +	}
 +	if (buffer_info->skb) {
 +		dev_kfree_skb_any(buffer_info->skb);
 +		buffer_info->skb = NULL;
 +	}
 +}
 +
 +/**
 + * pch_gbe_clean_tx_ring - Free Tx Buffers
 + * @adapter:  Board private structure
 + * @tx_ring:  Ring to be cleaned
 + */
 +static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
 +				   struct pch_gbe_tx_ring *tx_ring)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	struct pch_gbe_buffer *buffer_info;
 +	unsigned long size;
 +	unsigned int i;
 +
 +	/* Free all the Tx ring sk_buffs */
 +	for (i = 0; i < tx_ring->count; i++) {
 +		buffer_info = &tx_ring->buffer_info[i];
 +		pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
 +	}
 +	pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
 +
 +	size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 +	memset(tx_ring->buffer_info, 0, size);
 +
 +	/* Zero out the descriptor ring */
 +	memset(tx_ring->desc, 0, tx_ring->size);
 +	tx_ring->next_to_use = 0;
 +	tx_ring->next_to_clean = 0;
 +	iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
 +	iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
 +}
 +
 +/**
 + * pch_gbe_clean_rx_ring - Free Rx Buffers
 + * @adapter:  Board private structure
 + * @rx_ring:  Ring to free buffers from
 + */
 +static void
 +pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
 +		      struct pch_gbe_rx_ring *rx_ring)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	struct pch_gbe_buffer *buffer_info;
 +	unsigned long size;
 +	unsigned int i;
 +
 +	/* Free all the Rx ring sk_buffs */
 +	for (i = 0; i < rx_ring->count; i++) {
 +		buffer_info = &rx_ring->buffer_info[i];
 +		pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
 +	}
 +	pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
 +	size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 +	memset(rx_ring->buffer_info, 0, size);
 +
 +	/* Zero out the descriptor ring */
 +	memset(rx_ring->desc, 0, rx_ring->size);
 +	rx_ring->next_to_clean = 0;
 +	rx_ring->next_to_use = 0;
 +	iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
 +	iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
 +}
 +
 +static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
 +				    u16 duplex)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	unsigned long rgmii = 0;
 +
 +	/* Set the RGMII control. */
 +#ifdef PCH_GBE_MAC_IFOP_RGMII
 +	switch (speed) {
 +	case SPEED_10:
 +		rgmii = (PCH_GBE_RGMII_RATE_2_5M |
 +			 PCH_GBE_MAC_RGMII_CTRL_SETTING);
 +		break;
 +	case SPEED_100:
 +		rgmii = (PCH_GBE_RGMII_RATE_25M |
 +			 PCH_GBE_MAC_RGMII_CTRL_SETTING);
 +		break;
 +	case SPEED_1000:
 +		rgmii = (PCH_GBE_RGMII_RATE_125M |
 +			 PCH_GBE_MAC_RGMII_CTRL_SETTING);
 +		break;
 +	}
 +	iowrite32(rgmii, &hw->reg->RGMII_CTRL);
 +#else	/* GMII */
 +	rgmii = 0;
 +	iowrite32(rgmii, &hw->reg->RGMII_CTRL);
 +#endif
 +}
 +static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
 +			      u16 duplex)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	unsigned long mode = 0;
 +
 +	/* Set the communication mode */
 +	switch (speed) {
 +	case SPEED_10:
 +		mode = PCH_GBE_MODE_MII_ETHER;
 +		netdev->tx_queue_len = 10;
 +		break;
 +	case SPEED_100:
 +		mode = PCH_GBE_MODE_MII_ETHER;
 +		netdev->tx_queue_len = 100;
 +		break;
 +	case SPEED_1000:
 +		mode = PCH_GBE_MODE_GMII_ETHER;
 +		break;
 +	}
 +	if (duplex == DUPLEX_FULL)
 +		mode |= PCH_GBE_MODE_FULL_DUPLEX;
 +	else
 +		mode |= PCH_GBE_MODE_HALF_DUPLEX;
 +	iowrite32(mode, &hw->reg->MODE);
 +}
 +
 +/**
 + * pch_gbe_watchdog - Watchdog process
 + * @data:  Board private structure
 + */
 +static void pch_gbe_watchdog(unsigned long data)
 +{
 +	struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
 +	struct net_device *netdev = adapter->netdev;
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +
 +	pr_debug("right now = %ld\n", jiffies);
 +
 +	pch_gbe_update_stats(adapter);
 +	if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
 +		struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
 +		netdev->tx_queue_len = adapter->tx_queue_len;
 +		/* mii library handles link maintenance tasks */
 +		if (mii_ethtool_gset(&adapter->mii, &cmd)) {
 +			pr_err("ethtool get setting Error\n");
 +			mod_timer(&adapter->watchdog_timer,
 +				  round_jiffies(jiffies +
 +						PCH_GBE_WATCHDOG_PERIOD));
 +			return;
 +		}
 +		hw->mac.link_speed = ethtool_cmd_speed(&cmd);
 +		hw->mac.link_duplex = cmd.duplex;
 +		/* Set the RGMII control. */
 +		pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
 +						hw->mac.link_duplex);
 +		/* Set the communication mode */
 +		pch_gbe_set_mode(adapter, hw->mac.link_speed,
 +				 hw->mac.link_duplex);
 +		netdev_dbg(netdev,
 +			   "Link is Up %d Mbps %s-Duplex\n",
 +			   hw->mac.link_speed,
 +			   cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
 +		netif_carrier_on(netdev);
 +		netif_wake_queue(netdev);
 +	} else if ((!mii_link_ok(&adapter->mii)) &&
 +		   (netif_carrier_ok(netdev))) {
 +		netdev_dbg(netdev, "NIC Link is Down\n");
 +		hw->mac.link_speed = SPEED_10;
 +		hw->mac.link_duplex = DUPLEX_HALF;
 +		netif_carrier_off(netdev);
 +		netif_stop_queue(netdev);
 +	}
 +	mod_timer(&adapter->watchdog_timer,
 +		  round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
 +}
 +
 +/**
 + * pch_gbe_tx_queue - Carry out queuing of the transmission data
 + * @adapter:  Board private structure
 + * @tx_ring:  Tx descriptor ring structure
 + * @skb:      Sockt buffer structure
 + */
 +static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
 +			      struct pch_gbe_tx_ring *tx_ring,
 +			      struct sk_buff *skb)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	struct pch_gbe_tx_desc *tx_desc;
 +	struct pch_gbe_buffer *buffer_info;
 +	struct sk_buff *tmp_skb;
 +	unsigned int frame_ctrl;
 +	unsigned int ring_num;
 +	unsigned long flags;
 +
 +	/*-- Set frame control --*/
 +	frame_ctrl = 0;
 +	if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
 +		frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
 +	if (skb->ip_summed == CHECKSUM_NONE)
 +		frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
 +
 +	/* Performs checksum processing */
 +	/*
 +	 * It is because the hardware accelerator does not support a checksum,
 +	 * when the received data size is less than 64 bytes.
 +	 */
 +	if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
 +		frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
 +			      PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
 +		if (skb->protocol == htons(ETH_P_IP)) {
 +			struct iphdr *iph = ip_hdr(skb);
 +			unsigned int offset;
 +			iph->check = 0;
 +			iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
 +			offset = skb_transport_offset(skb);
 +			if (iph->protocol == IPPROTO_TCP) {
 +				skb->csum = 0;
 +				tcp_hdr(skb)->check = 0;
 +				skb->csum = skb_checksum(skb, offset,
 +							 skb->len - offset, 0);
 +				tcp_hdr(skb)->check =
 +					csum_tcpudp_magic(iph->saddr,
 +							  iph->daddr,
 +							  skb->len - offset,
 +							  IPPROTO_TCP,
 +							  skb->csum);
 +			} else if (iph->protocol == IPPROTO_UDP) {
 +				skb->csum = 0;
 +				udp_hdr(skb)->check = 0;
 +				skb->csum =
 +					skb_checksum(skb, offset,
 +						     skb->len - offset, 0);
 +				udp_hdr(skb)->check =
 +					csum_tcpudp_magic(iph->saddr,
 +							  iph->daddr,
 +							  skb->len - offset,
 +							  IPPROTO_UDP,
 +							  skb->csum);
 +			}
 +		}
 +	}
 +	spin_lock_irqsave(&tx_ring->tx_lock, flags);
 +	ring_num = tx_ring->next_to_use;
 +	if (unlikely((ring_num + 1) == tx_ring->count))
 +		tx_ring->next_to_use = 0;
 +	else
 +		tx_ring->next_to_use = ring_num + 1;
 +
 +	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 +	buffer_info = &tx_ring->buffer_info[ring_num];
 +	tmp_skb = buffer_info->skb;
 +
 +	/* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
 +	memcpy(tmp_skb->data, skb->data, ETH_HLEN);
 +	tmp_skb->data[ETH_HLEN] = 0x00;
 +	tmp_skb->data[ETH_HLEN + 1] = 0x00;
 +	tmp_skb->len = skb->len;
 +	memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
 +	       (skb->len - ETH_HLEN));
 +	/*-- Set Buffer information --*/
 +	buffer_info->length = tmp_skb->len;
 +	buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
 +					  buffer_info->length,
 +					  DMA_TO_DEVICE);
 +	if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
 +		pr_err("TX DMA map failed\n");
 +		buffer_info->dma = 0;
 +		buffer_info->time_stamp = 0;
 +		tx_ring->next_to_use = ring_num;
 +		return;
 +	}
 +	buffer_info->mapped = true;
 +	buffer_info->time_stamp = jiffies;
 +
 +	/*-- Set Tx descriptor --*/
 +	tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
 +	tx_desc->buffer_addr = (buffer_info->dma);
 +	tx_desc->length = (tmp_skb->len);
 +	tx_desc->tx_words_eob = ((tmp_skb->len + 3));
 +	tx_desc->tx_frame_ctrl = (frame_ctrl);
 +	tx_desc->gbec_status = (DSC_INIT16);
 +
 +	if (unlikely(++ring_num == tx_ring->count))
 +		ring_num = 0;
 +
 +	/* Update software pointer of TX descriptor */
 +	iowrite32(tx_ring->dma +
 +		  (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
 +		  &hw->reg->TX_DSC_SW_P);
 +	dev_kfree_skb_any(skb);
 +}
 +
 +/**
 + * pch_gbe_update_stats - Update the board statistics counters
 + * @adapter:  Board private structure
 + */
 +void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	struct pci_dev *pdev = adapter->pdev;
 +	struct pch_gbe_hw_stats *stats = &adapter->stats;
 +	unsigned long flags;
 +
 +	/*
 +	 * Prevent stats update while adapter is being reset, or if the pci
 +	 * connection is down.
 +	 */
 +	if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
 +		return;
 +
 +	spin_lock_irqsave(&adapter->stats_lock, flags);
 +
 +	/* Update device status "adapter->stats" */
 +	stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
 +	stats->tx_errors = stats->tx_length_errors +
 +	    stats->tx_aborted_errors +
 +	    stats->tx_carrier_errors + stats->tx_timeout_count;
 +
 +	/* Update network device status "adapter->net_stats" */
 +	netdev->stats.rx_packets = stats->rx_packets;
 +	netdev->stats.rx_bytes = stats->rx_bytes;
 +	netdev->stats.rx_dropped = stats->rx_dropped;
 +	netdev->stats.tx_packets = stats->tx_packets;
 +	netdev->stats.tx_bytes = stats->tx_bytes;
 +	netdev->stats.tx_dropped = stats->tx_dropped;
 +	/* Fill out the OS statistics structure */
 +	netdev->stats.multicast = stats->multicast;
 +	netdev->stats.collisions = stats->collisions;
 +	/* Rx Errors */
 +	netdev->stats.rx_errors = stats->rx_errors;
 +	netdev->stats.rx_crc_errors = stats->rx_crc_errors;
 +	netdev->stats.rx_frame_errors = stats->rx_frame_errors;
 +	/* Tx Errors */
 +	netdev->stats.tx_errors = stats->tx_errors;
 +	netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
 +	netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
 +
 +	spin_unlock_irqrestore(&adapter->stats_lock, flags);
 +}
 +
 +static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	u32 rxdma;
 +	u16 value;
 +	int ret;
 +
 +	/* Disable Receive DMA */
 +	rxdma = ioread32(&hw->reg->DMA_CTRL);
 +	rxdma &= ~PCH_GBE_RX_DMA_EN;
 +	iowrite32(rxdma, &hw->reg->DMA_CTRL);
 +	/* Wait Rx DMA BUS is IDLE */
 +	ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
 +	if (ret) {
 +		/* Disable Bus master */
 +		pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
 +		value &= ~PCI_COMMAND_MASTER;
 +		pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
 +		/* Stop Receive */
 +		pch_gbe_mac_reset_rx(hw);
 +		/* Enable Bus master */
 +		value |= PCI_COMMAND_MASTER;
 +		pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
 +	} else {
 +		/* Stop Receive */
 +		pch_gbe_mac_reset_rx(hw);
 +	}
 +}
 +
 +static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
 +{
 +	u32 rxdma;
 +
 +	/* Enables Receive DMA */
 +	rxdma = ioread32(&hw->reg->DMA_CTRL);
 +	rxdma |= PCH_GBE_RX_DMA_EN;
 +	iowrite32(rxdma, &hw->reg->DMA_CTRL);
 +	/* Enables Receive */
 +	iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
 +	return;
 +}
 +
 +/**
 + * pch_gbe_intr - Interrupt Handler
 + * @irq:   Interrupt number
 + * @data:  Pointer to a network interface device structure
 + * Returns
 + *	- IRQ_HANDLED:	Our interrupt
 + *	- IRQ_NONE:	Not our interrupt
 + */
 +static irqreturn_t pch_gbe_intr(int irq, void *data)
 +{
 +	struct net_device *netdev = data;
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	u32 int_st;
 +	u32 int_en;
 +
 +	/* Check request status */
 +	int_st = ioread32(&hw->reg->INT_ST);
 +	int_st = int_st & ioread32(&hw->reg->INT_EN);
 +	/* When request status is no interruption factor */
 +	if (unlikely(!int_st))
 +		return IRQ_NONE;	/* Not our interrupt. End processing. */
 +	pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
 +	if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
 +		adapter->stats.intr_rx_frame_err_count++;
 +	if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
 +		if (!adapter->rx_stop_flag) {
 +			adapter->stats.intr_rx_fifo_err_count++;
 +			pr_debug("Rx fifo over run\n");
 +			adapter->rx_stop_flag = true;
 +			int_en = ioread32(&hw->reg->INT_EN);
 +			iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
 +				  &hw->reg->INT_EN);
 +			pch_gbe_stop_receive(adapter);
++			int_st |= ioread32(&hw->reg->INT_ST);
++			int_st = int_st & ioread32(&hw->reg->INT_EN);
 +		}
 +	if (int_st & PCH_GBE_INT_RX_DMA_ERR)
 +		adapter->stats.intr_rx_dma_err_count++;
 +	if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
 +		adapter->stats.intr_tx_fifo_err_count++;
 +	if (int_st & PCH_GBE_INT_TX_DMA_ERR)
 +		adapter->stats.intr_tx_dma_err_count++;
 +	if (int_st & PCH_GBE_INT_TCPIP_ERR)
 +		adapter->stats.intr_tcpip_err_count++;
 +	/* When Rx descriptor is empty  */
 +	if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
 +		adapter->stats.intr_rx_dsc_empty_count++;
 +		pr_debug("Rx descriptor is empty\n");
 +		int_en = ioread32(&hw->reg->INT_EN);
 +		iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
 +		if (hw->mac.tx_fc_enable) {
 +			/* Set Pause packet */
 +			pch_gbe_mac_set_pause_packet(hw);
 +		}
- 		if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
- 		    == 0) {
- 			return IRQ_HANDLED;
- 		}
 +	}
 +
 +	/* When request status is Receive interruption */
- 	if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
++	if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
++	    (adapter->rx_stop_flag == true)) {
 +		if (likely(napi_schedule_prep(&adapter->napi))) {
 +			/* Enable only Rx Descriptor empty */
 +			atomic_inc(&adapter->irq_sem);
 +			int_en = ioread32(&hw->reg->INT_EN);
 +			int_en &=
 +			    ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
 +			iowrite32(int_en, &hw->reg->INT_EN);
 +			/* Start polling for NAPI */
 +			__napi_schedule(&adapter->napi);
 +		}
 +	}
 +	pr_debug("return = 0x%08x  INT_EN reg = 0x%08x\n",
 +		 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
 +	return IRQ_HANDLED;
 +}
 +
 +/**
 + * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
 + * @adapter:       Board private structure
 + * @rx_ring:       Rx descriptor ring
 + * @cleaned_count: Cleaned count
 + */
 +static void
 +pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
 +			 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	struct pci_dev *pdev = adapter->pdev;
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	struct pch_gbe_rx_desc *rx_desc;
 +	struct pch_gbe_buffer *buffer_info;
 +	struct sk_buff *skb;
 +	unsigned int i;
 +	unsigned int bufsz;
 +
 +	bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
 +	i = rx_ring->next_to_use;
 +
 +	while ((cleaned_count--)) {
 +		buffer_info = &rx_ring->buffer_info[i];
 +		skb = netdev_alloc_skb(netdev, bufsz);
 +		if (unlikely(!skb)) {
 +			/* Better luck next round */
 +			adapter->stats.rx_alloc_buff_failed++;
 +			break;
 +		}
 +		/* align */
 +		skb_reserve(skb, NET_IP_ALIGN);
 +		buffer_info->skb = skb;
 +
 +		buffer_info->dma = dma_map_single(&pdev->dev,
 +						  buffer_info->rx_buffer,
 +						  buffer_info->length,
 +						  DMA_FROM_DEVICE);
 +		if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
 +			dev_kfree_skb(skb);
 +			buffer_info->skb = NULL;
 +			buffer_info->dma = 0;
 +			adapter->stats.rx_alloc_buff_failed++;
 +			break; /* while !buffer_info->skb */
 +		}
 +		buffer_info->mapped = true;
 +		rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
 +		rx_desc->buffer_addr = (buffer_info->dma);
 +		rx_desc->gbec_status = DSC_INIT16;
 +
 +		pr_debug("i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
 +			 i, (unsigned long long)buffer_info->dma,
 +			 buffer_info->length);
 +
 +		if (unlikely(++i == rx_ring->count))
 +			i = 0;
 +	}
 +	if (likely(rx_ring->next_to_use != i)) {
 +		rx_ring->next_to_use = i;
 +		if (unlikely(i-- == 0))
 +			i = (rx_ring->count - 1);
 +		iowrite32(rx_ring->dma +
 +			  (int)sizeof(struct pch_gbe_rx_desc) * i,
 +			  &hw->reg->RX_DSC_SW_P);
 +	}
 +	return;
 +}
 +
 +static int
 +pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
 +			 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
 +{
 +	struct pci_dev *pdev = adapter->pdev;
 +	struct pch_gbe_buffer *buffer_info;
 +	unsigned int i;
 +	unsigned int bufsz;
 +	unsigned int size;
 +
 +	bufsz = adapter->rx_buffer_len;
 +
 +	size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
 +	rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
 +						&rx_ring->rx_buff_pool_logic,
 +						GFP_KERNEL);
 +	if (!rx_ring->rx_buff_pool) {
 +		pr_err("Unable to allocate memory for the receive poll buffer\n");
 +		return -ENOMEM;
 +	}
 +	memset(rx_ring->rx_buff_pool, 0, size);
 +	rx_ring->rx_buff_pool_size = size;
 +	for (i = 0; i < rx_ring->count; i++) {
 +		buffer_info = &rx_ring->buffer_info[i];
 +		buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
 +		buffer_info->length = bufsz;
 +	}
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
 + * @adapter:   Board private structure
 + * @tx_ring:   Tx descriptor ring
 + */
 +static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
 +					struct pch_gbe_tx_ring *tx_ring)
 +{
 +	struct pch_gbe_buffer *buffer_info;
 +	struct sk_buff *skb;
 +	unsigned int i;
 +	unsigned int bufsz;
 +	struct pch_gbe_tx_desc *tx_desc;
 +
 +	bufsz =
 +	    adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
 +
 +	for (i = 0; i < tx_ring->count; i++) {
 +		buffer_info = &tx_ring->buffer_info[i];
 +		skb = netdev_alloc_skb(adapter->netdev, bufsz);
 +		skb_reserve(skb, PCH_GBE_DMA_ALIGN);
 +		buffer_info->skb = skb;
 +		tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 +		tx_desc->gbec_status = (DSC_INIT16);
 +	}
 +	return;
 +}
 +
 +/**
 + * pch_gbe_clean_tx - Reclaim resources after transmit completes
 + * @adapter:   Board private structure
 + * @tx_ring:   Tx descriptor ring
 + * Returns
 + *	true:  Cleaned the descriptor
 + *	false: Not cleaned the descriptor
 + */
 +static bool
 +pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
 +		 struct pch_gbe_tx_ring *tx_ring)
 +{
 +	struct pch_gbe_tx_desc *tx_desc;
 +	struct pch_gbe_buffer *buffer_info;
 +	struct sk_buff *skb;
 +	unsigned int i;
 +	unsigned int cleaned_count = 0;
- 	bool cleaned = false;
++	bool cleaned = true;
 +
 +	pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
 +
 +	i = tx_ring->next_to_clean;
 +	tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 +	pr_debug("gbec_status:0x%04x  dma_status:0x%04x\n",
 +		 tx_desc->gbec_status, tx_desc->dma_status);
 +
 +	while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
 +		pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
- 		cleaned = true;
 +		buffer_info = &tx_ring->buffer_info[i];
 +		skb = buffer_info->skb;
 +
 +		if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
 +			adapter->stats.tx_aborted_errors++;
 +			pr_err("Transfer Abort Error\n");
 +		} else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
 +			  ) {
 +			adapter->stats.tx_carrier_errors++;
 +			pr_err("Transfer Carrier Sense Error\n");
 +		} else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
 +			  ) {
 +			adapter->stats.tx_aborted_errors++;
 +			pr_err("Transfer Collision Abort Error\n");
 +		} else if ((tx_desc->gbec_status &
 +			    (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
 +			     PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
 +			adapter->stats.collisions++;
 +			adapter->stats.tx_packets++;
 +			adapter->stats.tx_bytes += skb->len;
 +			pr_debug("Transfer Collision\n");
 +		} else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
 +			  ) {
 +			adapter->stats.tx_packets++;
 +			adapter->stats.tx_bytes += skb->len;
 +		}
 +		if (buffer_info->mapped) {
 +			pr_debug("unmap buffer_info->dma : %d\n", i);
 +			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 +					 buffer_info->length, DMA_TO_DEVICE);
 +			buffer_info->mapped = false;
 +		}
 +		if (buffer_info->skb) {
 +			pr_debug("trim buffer_info->skb : %d\n", i);
 +			skb_trim(buffer_info->skb, 0);
 +		}
 +		tx_desc->gbec_status = DSC_INIT16;
 +		if (unlikely(++i == tx_ring->count))
 +			i = 0;
 +		tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 +
 +		/* weight of a sort for tx, to avoid endless transmit cleanup */
- 		if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
++		if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
++			cleaned = false;
 +			break;
++		}
 +	}
 +	pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
 +		 cleaned_count);
 +	/* Recover from running out of Tx resources in xmit_frame */
 +	if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
 +		netif_wake_queue(adapter->netdev);
 +		adapter->stats.tx_restart_count++;
 +		pr_debug("Tx wake queue\n");
 +	}
 +	spin_lock(&adapter->tx_queue_lock);
 +	tx_ring->next_to_clean = i;
 +	spin_unlock(&adapter->tx_queue_lock);
 +	pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
 +	return cleaned;
 +}
 +
 +/**
 + * pch_gbe_clean_rx - Send received data up the network stack; legacy
 + * @adapter:     Board private structure
 + * @rx_ring:     Rx descriptor ring
 + * @work_done:   Completed count
 + * @work_to_do:  Request count
 + * Returns
 + *	true:  Cleaned the descriptor
 + *	false: Not cleaned the descriptor
 + */
 +static bool
 +pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
 +		 struct pch_gbe_rx_ring *rx_ring,
 +		 int *work_done, int work_to_do)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	struct pci_dev *pdev = adapter->pdev;
 +	struct pch_gbe_buffer *buffer_info;
 +	struct pch_gbe_rx_desc *rx_desc;
 +	u32 length;
 +	unsigned int i;
 +	unsigned int cleaned_count = 0;
 +	bool cleaned = false;
 +	struct sk_buff *skb;
 +	u8 dma_status;
 +	u16 gbec_status;
 +	u32 tcp_ip_status;
 +
 +	i = rx_ring->next_to_clean;
 +
 +	while (*work_done < work_to_do) {
 +		/* Check Rx descriptor status */
 +		rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
 +		if (rx_desc->gbec_status == DSC_INIT16)
 +			break;
 +		cleaned = true;
 +		cleaned_count++;
 +
 +		dma_status = rx_desc->dma_status;
 +		gbec_status = rx_desc->gbec_status;
 +		tcp_ip_status = rx_desc->tcp_ip_status;
 +		rx_desc->gbec_status = DSC_INIT16;
 +		buffer_info = &rx_ring->buffer_info[i];
 +		skb = buffer_info->skb;
 +		buffer_info->skb = NULL;
 +
 +		/* unmap dma */
 +		dma_unmap_single(&pdev->dev, buffer_info->dma,
 +				   buffer_info->length, DMA_FROM_DEVICE);
 +		buffer_info->mapped = false;
 +
 +		pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
 +			 "TCP:0x%08x]  BufInf = 0x%p\n",
 +			 i, dma_status, gbec_status, tcp_ip_status,
 +			 buffer_info);
 +		/* Error check */
 +		if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
 +			adapter->stats.rx_frame_errors++;
 +			pr_err("Receive Not Octal Error\n");
 +		} else if (unlikely(gbec_status &
 +				PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
 +			adapter->stats.rx_frame_errors++;
 +			pr_err("Receive Nibble Error\n");
 +		} else if (unlikely(gbec_status &
 +				PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
 +			adapter->stats.rx_crc_errors++;
 +			pr_err("Receive CRC Error\n");
 +		} else {
 +			/* get receive length */
 +			/* length convert[-3], length includes FCS length */
 +			length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
 +			if (rx_desc->rx_words_eob & 0x02)
 +				length = length - 4;
 +			/*
 +			 * buffer_info->rx_buffer: [Header:14][payload]
 +			 * skb->data: [Reserve:2][Header:14][payload]
 +			 */
 +			memcpy(skb->data, buffer_info->rx_buffer, length);
 +
 +			/* update status of driver */
 +			adapter->stats.rx_bytes += length;
 +			adapter->stats.rx_packets++;
 +			if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
 +				adapter->stats.multicast++;
 +			/* Write meta date of skb */
 +			skb_put(skb, length);
 +			skb->protocol = eth_type_trans(skb, netdev);
 +			if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
 +				skb->ip_summed = CHECKSUM_NONE;
 +			else
 +				skb->ip_summed = CHECKSUM_UNNECESSARY;
 +
 +			napi_gro_receive(&adapter->napi, skb);
 +			(*work_done)++;
 +			pr_debug("Receive skb->ip_summed: %d length: %d\n",
 +				 skb->ip_summed, length);
 +		}
 +		/* return some buffers to hardware, one at a time is too slow */
 +		if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
 +			pch_gbe_alloc_rx_buffers(adapter, rx_ring,
 +						 cleaned_count);
 +			cleaned_count = 0;
 +		}
 +		if (++i == rx_ring->count)
 +			i = 0;
 +	}
 +	rx_ring->next_to_clean = i;
 +	if (cleaned_count)
 +		pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 +	return cleaned;
 +}
 +
 +/**
 + * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
 + * @adapter:  Board private structure
 + * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
 + * Returns
 + *	0:		Successfully
 + *	Negative value:	Failed
 + */
 +int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
 +				struct pch_gbe_tx_ring *tx_ring)
 +{
 +	struct pci_dev *pdev = adapter->pdev;
 +	struct pch_gbe_tx_desc *tx_desc;
 +	int size;
 +	int desNo;
 +
 +	size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 +	tx_ring->buffer_info = vzalloc(size);
 +	if (!tx_ring->buffer_info) {
 +		pr_err("Unable to allocate memory for the buffer information\n");
 +		return -ENOMEM;
 +	}
 +
 +	tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
 +
 +	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
 +					   &tx_ring->dma, GFP_KERNEL);
 +	if (!tx_ring->desc) {
 +		vfree(tx_ring->buffer_info);
 +		pr_err("Unable to allocate memory for the transmit descriptor ring\n");
 +		return -ENOMEM;
 +	}
 +	memset(tx_ring->desc, 0, tx_ring->size);
 +
 +	tx_ring->next_to_use = 0;
 +	tx_ring->next_to_clean = 0;
 +	spin_lock_init(&tx_ring->tx_lock);
 +
 +	for (desNo = 0; desNo < tx_ring->count; desNo++) {
 +		tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
 +		tx_desc->gbec_status = DSC_INIT16;
 +	}
 +	pr_debug("tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx\n"
 +		 "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
 +		 tx_ring->desc, (unsigned long long)tx_ring->dma,
 +		 tx_ring->next_to_clean, tx_ring->next_to_use);
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
 + * @adapter:  Board private structure
 + * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
 + * Returns
 + *	0:		Successfully
 + *	Negative value:	Failed
 + */
 +int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
 +				struct pch_gbe_rx_ring *rx_ring)
 +{
 +	struct pci_dev *pdev = adapter->pdev;
 +	struct pch_gbe_rx_desc *rx_desc;
 +	int size;
 +	int desNo;
 +
 +	size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 +	rx_ring->buffer_info = vzalloc(size);
 +	if (!rx_ring->buffer_info) {
 +		pr_err("Unable to allocate memory for the receive descriptor ring\n");
 +		return -ENOMEM;
 +	}
 +	rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
 +	rx_ring->desc =	dma_alloc_coherent(&pdev->dev, rx_ring->size,
 +					   &rx_ring->dma, GFP_KERNEL);
 +
 +	if (!rx_ring->desc) {
 +		pr_err("Unable to allocate memory for the receive descriptor ring\n");
 +		vfree(rx_ring->buffer_info);
 +		return -ENOMEM;
 +	}
 +	memset(rx_ring->desc, 0, rx_ring->size);
 +	rx_ring->next_to_clean = 0;
 +	rx_ring->next_to_use = 0;
 +	for (desNo = 0; desNo < rx_ring->count; desNo++) {
 +		rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
 +		rx_desc->gbec_status = DSC_INIT16;
 +	}
 +	pr_debug("rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx "
 +		 "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
 +		 rx_ring->desc, (unsigned long long)rx_ring->dma,
 +		 rx_ring->next_to_clean, rx_ring->next_to_use);
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_free_tx_resources - Free Tx Resources
 + * @adapter:  Board private structure
 + * @tx_ring:  Tx descriptor ring for a specific queue
 + */
 +void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
 +				struct pch_gbe_tx_ring *tx_ring)
 +{
 +	struct pci_dev *pdev = adapter->pdev;
 +
 +	pch_gbe_clean_tx_ring(adapter, tx_ring);
 +	vfree(tx_ring->buffer_info);
 +	tx_ring->buffer_info = NULL;
 +	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
 +	tx_ring->desc = NULL;
 +}
 +
 +/**
 + * pch_gbe_free_rx_resources - Free Rx Resources
 + * @adapter:  Board private structure
 + * @rx_ring:  Ring to clean the resources from
 + */
 +void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
 +				struct pch_gbe_rx_ring *rx_ring)
 +{
 +	struct pci_dev *pdev = adapter->pdev;
 +
 +	pch_gbe_clean_rx_ring(adapter, rx_ring);
 +	vfree(rx_ring->buffer_info);
 +	rx_ring->buffer_info = NULL;
 +	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
 +	rx_ring->desc = NULL;
 +}
 +
 +/**
 + * pch_gbe_request_irq - Allocate an interrupt line
 + * @adapter:  Board private structure
 + * Returns
 + *	0:		Successfully
 + *	Negative value:	Failed
 + */
 +static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	int err;
 +	int flags;
 +
 +	flags = IRQF_SHARED;
 +	adapter->have_msi = false;
 +	err = pci_enable_msi(adapter->pdev);
 +	pr_debug("call pci_enable_msi\n");
 +	if (err) {
 +		pr_debug("call pci_enable_msi - Error: %d\n", err);
 +	} else {
 +		flags = 0;
 +		adapter->have_msi = true;
 +	}
 +	err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
 +			  flags, netdev->name, netdev);
 +	if (err)
 +		pr_err("Unable to allocate interrupt Error: %d\n", err);
 +	pr_debug("adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
 +		 adapter->have_msi, flags, err);
 +	return err;
 +}
 +
 +
 +static void pch_gbe_set_multi(struct net_device *netdev);
 +/**
 + * pch_gbe_up - Up GbE network device
 + * @adapter:  Board private structure
 + * Returns
 + *	0:		Successfully
 + *	Negative value:	Failed
 + */
 +int pch_gbe_up(struct pch_gbe_adapter *adapter)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
 +	struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
 +	int err;
 +
 +	/* hardware has been reset, we need to reload some things */
 +	pch_gbe_set_multi(netdev);
 +
 +	pch_gbe_setup_tctl(adapter);
 +	pch_gbe_configure_tx(adapter);
 +	pch_gbe_setup_rctl(adapter);
 +	pch_gbe_configure_rx(adapter);
 +
 +	err = pch_gbe_request_irq(adapter);
 +	if (err) {
 +		pr_err("Error: can't bring device up\n");
 +		return err;
 +	}
 +	err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
 +	if (err) {
 +		pr_err("Error: can't bring device up\n");
 +		return err;
 +	}
 +	pch_gbe_alloc_tx_buffers(adapter, tx_ring);
 +	pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
 +	adapter->tx_queue_len = netdev->tx_queue_len;
 +	pch_gbe_start_receive(&adapter->hw);
 +
 +	mod_timer(&adapter->watchdog_timer, jiffies);
 +
 +	napi_enable(&adapter->napi);
 +	pch_gbe_irq_enable(adapter);
 +	netif_start_queue(adapter->netdev);
 +
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_down - Down GbE network device
 + * @adapter:  Board private structure
 + */
 +void pch_gbe_down(struct pch_gbe_adapter *adapter)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
 +
 +	/* signal that we're down so the interrupt handler does not
 +	 * reschedule our watchdog timer */
 +	napi_disable(&adapter->napi);
 +	atomic_set(&adapter->irq_sem, 0);
 +
 +	pch_gbe_irq_disable(adapter);
 +	pch_gbe_free_irq(adapter);
 +
 +	del_timer_sync(&adapter->watchdog_timer);
 +
 +	netdev->tx_queue_len = adapter->tx_queue_len;
 +	netif_carrier_off(netdev);
 +	netif_stop_queue(netdev);
 +
 +	pch_gbe_reset(adapter);
 +	pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
 +	pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
 +
 +	pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
 +			    rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
 +	rx_ring->rx_buff_pool_logic = 0;
 +	rx_ring->rx_buff_pool_size = 0;
 +	rx_ring->rx_buff_pool = NULL;
 +}
 +
 +/**
 + * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
 + * @adapter:  Board private structure to initialize
 + * Returns
 + *	0:		Successfully
 + *	Negative value:	Failed
 + */
 +static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
 +{
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	struct net_device *netdev = adapter->netdev;
 +
 +	adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
 +	hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 +	hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 +
 +	/* Initialize the hardware-specific values */
 +	if (pch_gbe_hal_setup_init_funcs(hw)) {
 +		pr_err("Hardware Initialization Failure\n");
 +		return -EIO;
 +	}
 +	if (pch_gbe_alloc_queues(adapter)) {
 +		pr_err("Unable to allocate memory for queues\n");
 +		return -ENOMEM;
 +	}
 +	spin_lock_init(&adapter->hw.miim_lock);
 +	spin_lock_init(&adapter->tx_queue_lock);
 +	spin_lock_init(&adapter->stats_lock);
 +	spin_lock_init(&adapter->ethtool_lock);
 +	atomic_set(&adapter->irq_sem, 0);
 +	pch_gbe_irq_disable(adapter);
 +
 +	pch_gbe_init_stats(adapter);
 +
 +	pr_debug("rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
 +		 (u32) adapter->rx_buffer_len,
 +		 hw->mac.min_frame_size, hw->mac.max_frame_size);
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_open - Called when a network interface is made active
 + * @netdev:	Network interface device structure
 + * Returns
 + *	0:		Successfully
 + *	Negative value:	Failed
 + */
 +static int pch_gbe_open(struct net_device *netdev)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	int err;
 +
 +	/* allocate transmit descriptors */
 +	err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
 +	if (err)
 +		goto err_setup_tx;
 +	/* allocate receive descriptors */
 +	err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
 +	if (err)
 +		goto err_setup_rx;
 +	pch_gbe_hal_power_up_phy(hw);
 +	err = pch_gbe_up(adapter);
 +	if (err)
 +		goto err_up;
 +	pr_debug("Success End\n");
 +	return 0;
 +
 +err_up:
 +	if (!adapter->wake_up_evt)
 +		pch_gbe_hal_power_down_phy(hw);
 +	pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
 +err_setup_rx:
 +	pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
 +err_setup_tx:
 +	pch_gbe_reset(adapter);
 +	pr_err("Error End\n");
 +	return err;
 +}
 +
 +/**
 + * pch_gbe_stop - Disables a network interface
 + * @netdev:  Network interface device structure
 + * Returns
 + *	0: Successfully
 + */
 +static int pch_gbe_stop(struct net_device *netdev)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +
 +	pch_gbe_down(adapter);
 +	if (!adapter->wake_up_evt)
 +		pch_gbe_hal_power_down_phy(hw);
 +	pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
 +	pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_xmit_frame - Packet transmitting start
 + * @skb:     Socket buffer structure
 + * @netdev:  Network interface device structure
 + * Returns
 + *	- NETDEV_TX_OK:   Normal end
 + *	- NETDEV_TX_BUSY: Error end
 + */
 +static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
 +	unsigned long flags;
 +
 +	if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
 +		pr_err("Transfer length Error: skb len: %d > max: %d\n",
 +		       skb->len, adapter->hw.mac.max_frame_size);
 +		dev_kfree_skb_any(skb);
 +		adapter->stats.tx_length_errors++;
 +		return NETDEV_TX_OK;
 +	}
 +	if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
 +		/* Collision - tell upper layer to requeue */
 +		return NETDEV_TX_LOCKED;
 +	}
 +	if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
 +		netif_stop_queue(netdev);
 +		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 +		pr_debug("Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
 +			 tx_ring->next_to_use, tx_ring->next_to_clean);
 +		return NETDEV_TX_BUSY;
 +	}
 +	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 +
 +	/* CRC,ITAG no support */
 +	pch_gbe_tx_queue(adapter, tx_ring, skb);
 +	return NETDEV_TX_OK;
 +}
 +
 +/**
 + * pch_gbe_get_stats - Get System Network Statistics
 + * @netdev:  Network interface device structure
 + * Returns:  The current stats
 + */
 +static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
 +{
 +	/* only return the current stats */
 +	return &netdev->stats;
 +}
 +
 +/**
 + * pch_gbe_set_multi - Multicast and Promiscuous mode set
 + * @netdev:   Network interface device structure
 + */
 +static void pch_gbe_set_multi(struct net_device *netdev)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	struct netdev_hw_addr *ha;
 +	u8 *mta_list;
 +	u32 rctl;
 +	int i;
 +	int mc_count;
 +
 +	pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
 +
 +	/* Check for Promiscuous and All Multicast modes */
 +	rctl = ioread32(&hw->reg->RX_MODE);
 +	mc_count = netdev_mc_count(netdev);
 +	if ((netdev->flags & IFF_PROMISC)) {
 +		rctl &= ~PCH_GBE_ADD_FIL_EN;
 +		rctl &= ~PCH_GBE_MLT_FIL_EN;
 +	} else if ((netdev->flags & IFF_ALLMULTI)) {
 +		/* all the multicasting receive permissions */
 +		rctl |= PCH_GBE_ADD_FIL_EN;
 +		rctl &= ~PCH_GBE_MLT_FIL_EN;
 +	} else {
 +		if (mc_count >= PCH_GBE_MAR_ENTRIES) {
 +			/* all the multicasting receive permissions */
 +			rctl |= PCH_GBE_ADD_FIL_EN;
 +			rctl &= ~PCH_GBE_MLT_FIL_EN;
 +		} else {
 +			rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
 +		}
 +	}
 +	iowrite32(rctl, &hw->reg->RX_MODE);
 +
 +	if (mc_count >= PCH_GBE_MAR_ENTRIES)
 +		return;
 +	mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
 +	if (!mta_list)
 +		return;
 +
 +	/* The shared function expects a packed array of only addresses. */
 +	i = 0;
 +	netdev_for_each_mc_addr(ha, netdev) {
 +		if (i == mc_count)
 +			break;
 +		memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
 +	}
 +	pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
 +					PCH_GBE_MAR_ENTRIES);
 +	kfree(mta_list);
 +
 +	pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
 +		 ioread32(&hw->reg->RX_MODE), mc_count);
 +}
 +
 +/**
 + * pch_gbe_set_mac - Change the Ethernet Address of the NIC
 + * @netdev: Network interface device structure
 + * @addr:   Pointer to an address structure
 + * Returns
 + *	0:		Successfully
 + *	-EADDRNOTAVAIL:	Failed
 + */
 +static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct sockaddr *skaddr = addr;
 +	int ret_val;
 +
 +	if (!is_valid_ether_addr(skaddr->sa_data)) {
 +		ret_val = -EADDRNOTAVAIL;
 +	} else {
 +		memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
 +		memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
 +		pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
 +		ret_val = 0;
 +	}
 +	pr_debug("ret_val : 0x%08x\n", ret_val);
 +	pr_debug("dev_addr : %pM\n", netdev->dev_addr);
 +	pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
 +	pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
 +		 ioread32(&adapter->hw.reg->mac_adr[0].high),
 +		 ioread32(&adapter->hw.reg->mac_adr[0].low));
 +	return ret_val;
 +}
 +
 +/**
 + * pch_gbe_change_mtu - Change the Maximum Transfer Unit
 + * @netdev:   Network interface device structure
 + * @new_mtu:  New value for maximum frame size
 + * Returns
 + *	0:		Successfully
 + *	-EINVAL:	Failed
 + */
 +static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	int max_frame;
 +	unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
 +	int err;
 +
 +	max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 +	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
 +		(max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
 +		pr_err("Invalid MTU setting\n");
 +		return -EINVAL;
 +	}
 +	if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
 +		adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
 +	else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
 +		adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
 +	else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
 +		adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
 +	else
 +		adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
 +
 +	if (netif_running(netdev)) {
 +		pch_gbe_down(adapter);
 +		err = pch_gbe_up(adapter);
 +		if (err) {
 +			adapter->rx_buffer_len = old_rx_buffer_len;
 +			pch_gbe_up(adapter);
 +			return -ENOMEM;
 +		} else {
 +			netdev->mtu = new_mtu;
 +			adapter->hw.mac.max_frame_size = max_frame;
 +		}
 +	} else {
 +		pch_gbe_reset(adapter);
 +		netdev->mtu = new_mtu;
 +		adapter->hw.mac.max_frame_size = max_frame;
 +	}
 +
 +	pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
 +		 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
 +		 adapter->hw.mac.max_frame_size);
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_set_features - Reset device after features changed
 + * @netdev:   Network interface device structure
 + * @features:  New features
 + * Returns
 + *	0:		HW state updated successfully
 + */
 +static int pch_gbe_set_features(struct net_device *netdev, u32 features)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	u32 changed = features ^ netdev->features;
 +
 +	if (!(changed & NETIF_F_RXCSUM))
 +		return 0;
 +
 +	if (netif_running(netdev))
 +		pch_gbe_reinit_locked(adapter);
 +	else
 +		pch_gbe_reset(adapter);
 +
 +	return 0;
 +}
 +
 +/**
 + * pch_gbe_ioctl - Controls register through a MII interface
 + * @netdev:   Network interface device structure
 + * @ifr:      Pointer to ifr structure
 + * @cmd:      Control command
 + * Returns
 + *	0:	Successfully
 + *	Negative value:	Failed
 + */
 +static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +	pr_debug("cmd : 0x%04x\n", cmd);
 +
 +	return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
 +}
 +
 +/**
 + * pch_gbe_tx_timeout - Respond to a Tx Hang
 + * @netdev:   Network interface device structure
 + */
 +static void pch_gbe_tx_timeout(struct net_device *netdev)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +	/* Do the reset outside of interrupt context */
 +	adapter->stats.tx_timeout_count++;
 +	schedule_work(&adapter->reset_task);
 +}
 +
 +/**
 + * pch_gbe_napi_poll - NAPI receive and transfer polling callback
 + * @napi:    Pointer of polling device struct
 + * @budget:  The maximum number of a packet
 + * Returns
 + *	false:  Exit the polling mode
 + *	true:   Continue the polling mode
 + */
 +static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
 +{
 +	struct pch_gbe_adapter *adapter =
 +	    container_of(napi, struct pch_gbe_adapter, napi);
- 	struct net_device *netdev = adapter->netdev;
 +	int work_done = 0;
 +	bool poll_end_flag = false;
 +	bool cleaned = false;
 +	u32 int_en;
 +
 +	pr_debug("budget : %d\n", budget);
 +
- 	/* Keep link state information with original netdev */
- 	if (!netif_carrier_ok(netdev)) {
++	pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
++	cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
++
++	if (!cleaned)
++		work_done = budget;
++	/* If no Tx and not enough Rx work done,
++	 * exit the polling mode
++	 */
++	if (work_done < budget)
 +		poll_end_flag = true;
- 	} else {
- 		pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
++
++	if (poll_end_flag) {
++		napi_complete(napi);
++		if (adapter->rx_stop_flag) {
++			adapter->rx_stop_flag = false;
++			pch_gbe_start_receive(&adapter->hw);
++		}
++		pch_gbe_irq_enable(adapter);
++	} else
 +		if (adapter->rx_stop_flag) {
 +			adapter->rx_stop_flag = false;
 +			pch_gbe_start_receive(&adapter->hw);
 +			int_en = ioread32(&adapter->hw.reg->INT_EN);
 +			iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
- 					&adapter->hw.reg->INT_EN);
++				&adapter->hw.reg->INT_EN);
 +		}
- 		cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
- 
- 		if (cleaned)
- 			work_done = budget;
- 		/* If no Tx and not enough Rx work done,
- 		 * exit the polling mode
- 		 */
- 		if ((work_done < budget) || !netif_running(netdev))
- 			poll_end_flag = true;
- 	}
- 
- 	if (poll_end_flag) {
- 		napi_complete(napi);
- 		pch_gbe_irq_enable(adapter);
- 	}
 +
 +	pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
 +		 poll_end_flag, work_done, budget);
 +
 +	return work_done;
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +/**
 + * pch_gbe_netpoll - Used by things like netconsole to send skbs
 + * @netdev:  Network interface device structure
 + */
 +static void pch_gbe_netpoll(struct net_device *netdev)
 +{
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +	disable_irq(adapter->pdev->irq);
 +	pch_gbe_intr(adapter->pdev->irq, netdev);
 +	enable_irq(adapter->pdev->irq);
 +}
 +#endif
 +
 +static const struct net_device_ops pch_gbe_netdev_ops = {
 +	.ndo_open = pch_gbe_open,
 +	.ndo_stop = pch_gbe_stop,
 +	.ndo_start_xmit = pch_gbe_xmit_frame,
 +	.ndo_get_stats = pch_gbe_get_stats,
 +	.ndo_set_mac_address = pch_gbe_set_mac,
 +	.ndo_tx_timeout = pch_gbe_tx_timeout,
 +	.ndo_change_mtu = pch_gbe_change_mtu,
 +	.ndo_set_features = pch_gbe_set_features,
 +	.ndo_do_ioctl = pch_gbe_ioctl,
 +	.ndo_set_rx_mode = pch_gbe_set_multi,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +	.ndo_poll_controller = pch_gbe_netpoll,
 +#endif
 +};
 +
 +static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
 +						pci_channel_state_t state)
 +{
 +	struct net_device *netdev = pci_get_drvdata(pdev);
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +	netif_device_detach(netdev);
 +	if (netif_running(netdev))
 +		pch_gbe_down(adapter);
 +	pci_disable_device(pdev);
 +	/* Request a slot slot reset. */
 +	return PCI_ERS_RESULT_NEED_RESET;
 +}
 +
 +static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
 +{
 +	struct net_device *netdev = pci_get_drvdata(pdev);
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +
 +	if (pci_enable_device(pdev)) {
 +		pr_err("Cannot re-enable PCI device after reset\n");
 +		return PCI_ERS_RESULT_DISCONNECT;
 +	}
 +	pci_set_master(pdev);
 +	pci_enable_wake(pdev, PCI_D0, 0);
 +	pch_gbe_hal_power_up_phy(hw);
 +	pch_gbe_reset(adapter);
 +	/* Clear wake up status */
 +	pch_gbe_mac_set_wol_event(hw, 0);
 +
 +	return PCI_ERS_RESULT_RECOVERED;
 +}
 +
 +static void pch_gbe_io_resume(struct pci_dev *pdev)
 +{
 +	struct net_device *netdev = pci_get_drvdata(pdev);
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +	if (netif_running(netdev)) {
 +		if (pch_gbe_up(adapter)) {
 +			pr_debug("can't bring device back up after reset\n");
 +			return;
 +		}
 +	}
 +	netif_device_attach(netdev);
 +}
 +
 +static int __pch_gbe_suspend(struct pci_dev *pdev)
 +{
 +	struct net_device *netdev = pci_get_drvdata(pdev);
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	u32 wufc = adapter->wake_up_evt;
 +	int retval = 0;
 +
 +	netif_device_detach(netdev);
 +	if (netif_running(netdev))
 +		pch_gbe_down(adapter);
 +	if (wufc) {
 +		pch_gbe_set_multi(netdev);
 +		pch_gbe_setup_rctl(adapter);
 +		pch_gbe_configure_rx(adapter);
 +		pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
 +					hw->mac.link_duplex);
 +		pch_gbe_set_mode(adapter, hw->mac.link_speed,
 +					hw->mac.link_duplex);
 +		pch_gbe_mac_set_wol_event(hw, wufc);
 +		pci_disable_device(pdev);
 +	} else {
 +		pch_gbe_hal_power_down_phy(hw);
 +		pch_gbe_mac_set_wol_event(hw, wufc);
 +		pci_disable_device(pdev);
 +	}
 +	return retval;
 +}
 +
 +#ifdef CONFIG_PM
 +static int pch_gbe_suspend(struct device *device)
 +{
 +	struct pci_dev *pdev = to_pci_dev(device);
 +
 +	return __pch_gbe_suspend(pdev);
 +}
 +
 +static int pch_gbe_resume(struct device *device)
 +{
 +	struct pci_dev *pdev = to_pci_dev(device);
 +	struct net_device *netdev = pci_get_drvdata(pdev);
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +	struct pch_gbe_hw *hw = &adapter->hw;
 +	u32 err;
 +
 +	err = pci_enable_device(pdev);
 +	if (err) {
 +		pr_err("Cannot enable PCI device from suspend\n");
 +		return err;
 +	}
 +	pci_set_master(pdev);
 +	pch_gbe_hal_power_up_phy(hw);
 +	pch_gbe_reset(adapter);
 +	/* Clear wake on lan control and status */
 +	pch_gbe_mac_set_wol_event(hw, 0);
 +
 +	if (netif_running(netdev))
 +		pch_gbe_up(adapter);
 +	netif_device_attach(netdev);
 +
 +	return 0;
 +}
 +#endif /* CONFIG_PM */
 +
 +static void pch_gbe_shutdown(struct pci_dev *pdev)
 +{
 +	__pch_gbe_suspend(pdev);
 +	if (system_state == SYSTEM_POWER_OFF) {
 +		pci_wake_from_d3(pdev, true);
 +		pci_set_power_state(pdev, PCI_D3hot);
 +	}
 +}
 +
 +static void pch_gbe_remove(struct pci_dev *pdev)
 +{
 +	struct net_device *netdev = pci_get_drvdata(pdev);
 +	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +	cancel_work_sync(&adapter->reset_task);
 +	unregister_netdev(netdev);
 +
 +	pch_gbe_hal_phy_hw_reset(&adapter->hw);
 +
 +	kfree(adapter->tx_ring);
 +	kfree(adapter->rx_ring);
 +
 +	iounmap(adapter->hw.reg);
 +	pci_release_regions(pdev);
 +	free_netdev(netdev);
 +	pci_disable_device(pdev);
 +}
 +
 +static int pch_gbe_probe(struct pci_dev *pdev,
 +			  const struct pci_device_id *pci_id)
 +{
 +	struct net_device *netdev;
 +	struct pch_gbe_adapter *adapter;
 +	int ret;
 +
 +	ret = pci_enable_device(pdev);
 +	if (ret)
 +		return ret;
 +
 +	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
 +		|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
 +		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +		if (ret) {
 +			ret = pci_set_consistent_dma_mask(pdev,
 +							  DMA_BIT_MASK(32));
 +			if (ret) {
 +				dev_err(&pdev->dev, "ERR: No usable DMA "
 +					"configuration, aborting\n");
 +				goto err_disable_device;
 +			}
 +		}
 +	}
 +
 +	ret = pci_request_regions(pdev, KBUILD_MODNAME);
 +	if (ret) {
 +		dev_err(&pdev->dev,
 +			"ERR: Can't reserve PCI I/O and memory resources\n");
 +		goto err_disable_device;
 +	}
 +	pci_set_master(pdev);
 +
 +	netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
 +	if (!netdev) {
 +		ret = -ENOMEM;
 +		dev_err(&pdev->dev,
 +			"ERR: Can't allocate and set up an Ethernet device\n");
 +		goto err_release_pci;
 +	}
 +	SET_NETDEV_DEV(netdev, &pdev->dev);
 +
 +	pci_set_drvdata(pdev, netdev);
 +	adapter = netdev_priv(netdev);
 +	adapter->netdev = netdev;
 +	adapter->pdev = pdev;
 +	adapter->hw.back = adapter;
 +	adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
 +	if (!adapter->hw.reg) {
 +		ret = -EIO;
 +		dev_err(&pdev->dev, "Can't ioremap\n");
 +		goto err_free_netdev;
 +	}
 +
 +	netdev->netdev_ops = &pch_gbe_netdev_ops;
 +	netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
 +	netif_napi_add(netdev, &adapter->napi,
 +		       pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
 +	netdev->hw_features = NETIF_F_RXCSUM |
 +		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 +	netdev->features = netdev->hw_features;
 +	pch_gbe_set_ethtool_ops(netdev);
 +
 +	pch_gbe_mac_load_mac_addr(&adapter->hw);
 +	pch_gbe_mac_reset_hw(&adapter->hw);
 +
 +	/* setup the private structure */
 +	ret = pch_gbe_sw_init(adapter);
 +	if (ret)
 +		goto err_iounmap;
 +
 +	/* Initialize PHY */
 +	ret = pch_gbe_init_phy(adapter);
 +	if (ret) {
 +		dev_err(&pdev->dev, "PHY initialize error\n");
 +		goto err_free_adapter;
 +	}
 +	pch_gbe_hal_get_bus_info(&adapter->hw);
 +
 +	/* Read the MAC address. and store to the private data */
 +	ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
 +	if (ret) {
 +		dev_err(&pdev->dev, "MAC address Read Error\n");
 +		goto err_free_adapter;
 +	}
 +
 +	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
 +	if (!is_valid_ether_addr(netdev->dev_addr)) {
 +		dev_err(&pdev->dev, "Invalid MAC Address\n");
 +		ret = -EIO;
 +		goto err_free_adapter;
 +	}
 +	setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
 +		    (unsigned long)adapter);
 +
 +	INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
 +
 +	pch_gbe_check_options(adapter);
 +
 +	/* initialize the wol settings based on the eeprom settings */
 +	adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
 +	dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
 +
 +	/* reset the hardware with the new settings */
 +	pch_gbe_reset(adapter);
 +
 +	ret = register_netdev(netdev);
 +	if (ret)
 +		goto err_free_adapter;
 +	/* tell the stack to leave us alone until pch_gbe_open() is called */
 +	netif_carrier_off(netdev);
 +	netif_stop_queue(netdev);
 +
 +	dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
 +
 +	device_set_wakeup_enable(&pdev->dev, 1);
 +	return 0;
 +
 +err_free_adapter:
 +	pch_gbe_hal_phy_hw_reset(&adapter->hw);
 +	kfree(adapter->tx_ring);
 +	kfree(adapter->rx_ring);
 +err_iounmap:
 +	iounmap(adapter->hw.reg);
 +err_free_netdev:
 +	free_netdev(netdev);
 +err_release_pci:
 +	pci_release_regions(pdev);
 +err_disable_device:
 +	pci_disable_device(pdev);
 +	return ret;
 +}
 +
 +static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
 +	{.vendor = PCI_VENDOR_ID_INTEL,
 +	 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
 +	 .subvendor = PCI_ANY_ID,
 +	 .subdevice = PCI_ANY_ID,
 +	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
 +	 .class_mask = (0xFFFF00)
 +	 },
 +	{.vendor = PCI_VENDOR_ID_ROHM,
 +	 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
 +	 .subvendor = PCI_ANY_ID,
 +	 .subdevice = PCI_ANY_ID,
 +	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
 +	 .class_mask = (0xFFFF00)
 +	 },
 +	{.vendor = PCI_VENDOR_ID_ROHM,
 +	 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
 +	 .subvendor = PCI_ANY_ID,
 +	 .subdevice = PCI_ANY_ID,
 +	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
 +	 .class_mask = (0xFFFF00)
 +	 },
 +	/* required last entry */
 +	{0}
 +};
 +
 +#ifdef CONFIG_PM
 +static const struct dev_pm_ops pch_gbe_pm_ops = {
 +	.suspend = pch_gbe_suspend,
 +	.resume = pch_gbe_resume,
 +	.freeze = pch_gbe_suspend,
 +	.thaw = pch_gbe_resume,
 +	.poweroff = pch_gbe_suspend,
 +	.restore = pch_gbe_resume,
 +};
 +#endif
 +
 +static struct pci_error_handlers pch_gbe_err_handler = {
 +	.error_detected = pch_gbe_io_error_detected,
 +	.slot_reset = pch_gbe_io_slot_reset,
 +	.resume = pch_gbe_io_resume
 +};
 +
 +static struct pci_driver pch_gbe_driver = {
 +	.name = KBUILD_MODNAME,
 +	.id_table = pch_gbe_pcidev_id,
 +	.probe = pch_gbe_probe,
 +	.remove = pch_gbe_remove,
 +#ifdef CONFIG_PM
 +	.driver.pm = &pch_gbe_pm_ops,
 +#endif
 +	.shutdown = pch_gbe_shutdown,
 +	.err_handler = &pch_gbe_err_handler
 +};
 +
 +
 +static int __init pch_gbe_init_module(void)
 +{
 +	int ret;
 +
 +	ret = pci_register_driver(&pch_gbe_driver);
 +	if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
 +		if (copybreak == 0) {
 +			pr_info("copybreak disabled\n");
 +		} else {
 +			pr_info("copybreak enabled for packets <= %u bytes\n",
 +				copybreak);
 +		}
 +	}
 +	return ret;
 +}
 +
 +static void __exit pch_gbe_exit_module(void)
 +{
 +	pci_unregister_driver(&pch_gbe_driver);
 +}
 +
 +module_init(pch_gbe_init_module);
 +module_exit(pch_gbe_exit_module);
 +
 +MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
 +MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux at dsn.okisemi.com>");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(DRV_VERSION);
 +MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
 +
 +module_param(copybreak, uint, 0644);
 +MODULE_PARM_DESC(copybreak,
 +	"Maximum size of packet that is copied to a new buffer on receive");
 +
 +/* pch_gbe_main.c */
diff --combined drivers/net/macvlan.c
index b100c90,376e3e9..24cf942
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@@ -239,7 -239,7 +239,7 @@@ static int macvlan_queue_xmit(struct sk
  		dest = macvlan_hash_lookup(port, eth->h_dest);
  		if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
  			/* send to lowerdev first for its network taps */
- 			vlan->forward(vlan->lowerdev, skb);
+ 			dev_forward_skb(vlan->lowerdev, skb);
  
  			return NET_XMIT_SUCCESS;
  		}
@@@ -543,8 -543,7 +543,8 @@@ static int macvlan_ethtool_get_settings
  					struct ethtool_cmd *cmd)
  {
  	const struct macvlan_dev *vlan = netdev_priv(dev);
 -	return dev_ethtool_get_settings(vlan->lowerdev, cmd);
 +
 +	return __ethtool_get_settings(vlan->lowerdev, cmd);
  }
  
  static const struct ethtool_ops macvlan_ethtool_ops = {
@@@ -562,7 -561,7 +562,7 @@@ static const struct net_device_ops macv
  	.ndo_change_mtu		= macvlan_change_mtu,
  	.ndo_change_rx_flags	= macvlan_change_rx_flags,
  	.ndo_set_mac_address	= macvlan_set_mac_address,
 -	.ndo_set_multicast_list	= macvlan_set_multicast_list,
 +	.ndo_set_rx_mode	= macvlan_set_multicast_list,
  	.ndo_get_stats64	= macvlan_dev_get_stats64,
  	.ndo_validate_addr	= eth_validate_addr,
  	.ndo_vlan_rx_add_vid	= macvlan_vlan_rx_add_vid,
diff --combined drivers/net/phy/dp83640.c
index be381c2,edd7304..c588a16
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@@ -35,15 -35,16 +35,15 @@@
  #define LAYER4		0x02
  #define LAYER2		0x01
  #define MAX_RXTS	64
 -#define N_EXT_TS	1
 +#define N_EXT_TS	6
  #define PSF_PTPVER	2
  #define PSF_EVNT	0x4000
  #define PSF_RX		0x2000
  #define PSF_TX		0x1000
  #define EXT_EVENT	1
 -#define EXT_GPIO	1
 -#define CAL_EVENT	2
 -#define CAL_GPIO	9
 -#define CAL_TRIGGER	2
 +#define CAL_EVENT	7
 +#define CAL_TRIGGER	7
 +#define PER_TRIGGER	6
  
  /* phyter seems to miss the mark by 16 ns */
  #define ADJTIME_FIX	16
@@@ -130,30 -131,16 +130,30 @@@ struct dp83640_clock 
  
  /* globals */
  
 +enum {
 +	CALIBRATE_GPIO,
 +	PEROUT_GPIO,
 +	EXTTS0_GPIO,
 +	EXTTS1_GPIO,
 +	EXTTS2_GPIO,
 +	EXTTS3_GPIO,
 +	EXTTS4_GPIO,
 +	EXTTS5_GPIO,
 +	GPIO_TABLE_SIZE
 +};
 +
  static int chosen_phy = -1;
 -static ushort cal_gpio = 4;
 +static ushort gpio_tab[GPIO_TABLE_SIZE] = {
 +	1, 2, 3, 4, 8, 9, 10, 11
 +};
  
  module_param(chosen_phy, int, 0444);
 -module_param(cal_gpio, ushort, 0444);
 +module_param_array(gpio_tab, ushort, NULL, 0444);
  
  MODULE_PARM_DESC(chosen_phy, \
  	"The address of the PHY to use for the ancillary clock features");
 -MODULE_PARM_DESC(cal_gpio, \
 -	"Which GPIO line to use for synchronizing multiple PHYs");
 +MODULE_PARM_DESC(gpio_tab, \
 +	"Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
  
  /* a list of clocks and a mutex to protect it */
  static LIST_HEAD(phyter_clocks);
@@@ -248,61 -235,6 +248,61 @@@ static u64 phy2txts(struct phy_txts *p
  	return ns;
  }
  
 +static void periodic_output(struct dp83640_clock *clock,
 +			    struct ptp_clock_request *clkreq, bool on)
 +{
 +	struct dp83640_private *dp83640 = clock->chosen;
 +	struct phy_device *phydev = dp83640->phydev;
 +	u32 sec, nsec, period;
 +	u16 gpio, ptp_trig, trigger, val;
 +
 +	gpio = on ? gpio_tab[PEROUT_GPIO] : 0;
 +	trigger = PER_TRIGGER;
 +
 +	ptp_trig = TRIG_WR |
 +		(trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT |
 +		(gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT |
 +		TRIG_PER |
 +		TRIG_PULSE;
 +
 +	val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
 +
 +	if (!on) {
 +		val |= TRIG_DIS;
 +		mutex_lock(&clock->extreg_lock);
 +		ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
 +		ext_write(0, phydev, PAGE4, PTP_CTL, val);
 +		mutex_unlock(&clock->extreg_lock);
 +		return;
 +	}
 +
 +	sec = clkreq->perout.start.sec;
 +	nsec = clkreq->perout.start.nsec;
 +	period = clkreq->perout.period.sec * 1000000000UL;
 +	period += clkreq->perout.period.nsec;
 +
 +	mutex_lock(&clock->extreg_lock);
 +
 +	ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
 +
 +	/*load trigger*/
 +	val |= TRIG_LOAD;
 +	ext_write(0, phydev, PAGE4, PTP_CTL, val);
 +	ext_write(0, phydev, PAGE4, PTP_TDR, nsec & 0xffff);   /* ns[15:0] */
 +	ext_write(0, phydev, PAGE4, PTP_TDR, nsec >> 16);      /* ns[31:16] */
 +	ext_write(0, phydev, PAGE4, PTP_TDR, sec & 0xffff);    /* sec[15:0] */
 +	ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16);       /* sec[31:16] */
 +	ext_write(0, phydev, PAGE4, PTP_TDR, period & 0xffff); /* ns[15:0] */
 +	ext_write(0, phydev, PAGE4, PTP_TDR, period >> 16);    /* ns[31:16] */
 +
 +	/*enable trigger*/
 +	val &= ~TRIG_LOAD;
 +	val |= TRIG_EN;
 +	ext_write(0, phydev, PAGE4, PTP_CTL, val);
 +
 +	mutex_unlock(&clock->extreg_lock);
 +}
 +
  /* ptp clock methods */
  
  static int ptp_dp83640_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
@@@ -406,30 -338,19 +406,30 @@@ static int ptp_dp83640_enable(struct pt
  	struct dp83640_clock *clock =
  		container_of(ptp, struct dp83640_clock, caps);
  	struct phy_device *phydev = clock->chosen->phydev;
 -	u16 evnt;
 +	int index;
 +	u16 evnt, event_num, gpio_num;
  
  	switch (rq->type) {
  	case PTP_CLK_REQ_EXTTS:
 -		if (rq->extts.index != 0)
 +		index = rq->extts.index;
 +		if (index < 0 || index >= N_EXT_TS)
  			return -EINVAL;
 -		evnt = EVNT_WR | (EXT_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
 +		event_num = EXT_EVENT + index;
 +		evnt = EVNT_WR | (event_num & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
  		if (on) {
 -			evnt |= (EXT_GPIO & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
 +			gpio_num = gpio_tab[EXTTS0_GPIO + index];
 +			evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
  			evnt |= EVNT_RISE;
  		}
  		ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
  		return 0;
 +
 +	case PTP_CLK_REQ_PEROUT:
 +		if (rq->perout.index != 0)
 +			return -EINVAL;
 +		periodic_output(clock, rq, on);
 +		return 0;
 +
  	default:
  		break;
  	}
@@@ -520,10 -441,9 +520,10 @@@ static void recalibrate(struct dp83640_
  	struct list_head *this;
  	struct dp83640_private *tmp;
  	struct phy_device *master = clock->chosen->phydev;
 -	u16 cfg0, evnt, ptp_trig, trigger, val;
 +	u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val;
  
  	trigger = CAL_TRIGGER;
 +	cal_gpio = gpio_tab[CALIBRATE_GPIO];
  
  	mutex_lock(&clock->extreg_lock);
  
@@@ -622,17 -542,11 +622,17 @@@
  
  /* time stamping methods */
  
 +static inline u16 exts_chan_to_edata(int ch)
 +{
 +	return 1 << ((ch + EXT_EVENT) * 2);
 +}
 +
  static int decode_evnt(struct dp83640_private *dp83640,
  		       void *data, u16 ests)
  {
  	struct phy_txts *phy_txts;
  	struct ptp_clock_event event;
 +	int i, parsed;
  	int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
  	u16 ext_status = 0;
  
@@@ -654,25 -568,14 +654,25 @@@
  		dp83640->edata.ns_lo = phy_txts->ns_lo;
  	}
  
 +	if (ext_status) {
 +		parsed = words + 2;
 +	} else {
 +		parsed = words + 1;
 +		i = ((ests >> EVNT_NUM_SHIFT) & EVNT_NUM_MASK) - EXT_EVENT;
 +		ext_status = exts_chan_to_edata(i);
 +	}
 +
  	event.type = PTP_CLOCK_EXTTS;
 -	event.index = 0;
  	event.timestamp = phy2txts(&dp83640->edata);
  
 -	ptp_clock_event(dp83640->clock->ptp_clock, &event);
 +	for (i = 0; i < N_EXT_TS; i++) {
 +		if (ext_status & exts_chan_to_edata(i)) {
 +			event.index = i;
 +			ptp_clock_event(dp83640->clock->ptp_clock, &event);
 +		}
 +	}
  
 -	words = ext_status ? words + 2 : words + 1;
 -	return words * sizeof(u16);
 +	return parsed * sizeof(u16);
  }
  
  static void decode_rxts(struct dp83640_private *dp83640,
@@@ -686,7 -589,7 +686,7 @@@
  	prune_rx_ts(dp83640);
  
  	if (list_empty(&dp83640->rxpool)) {
- 		pr_warning("dp83640: rx timestamp pool is empty\n");
+ 		pr_debug("dp83640: rx timestamp pool is empty\n");
  		goto out;
  	}
  	rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@@ -709,7 -612,7 +709,7 @@@ static void decode_txts(struct dp83640_
  	skb = skb_dequeue(&dp83640->tx_queue);
  
  	if (!skb) {
- 		pr_warning("dp83640: have timestamp but tx_queue empty\n");
+ 		pr_debug("dp83640: have timestamp but tx_queue empty\n");
  		return;
  	}
  	ns = phy2txts(phy_txts);
@@@ -761,41 -664,6 +761,41 @@@ static void decode_status_frame(struct 
  	}
  }
  
 +static int is_sync(struct sk_buff *skb, int type)
 +{
 +	u8 *data = skb->data, *msgtype;
 +	unsigned int offset = 0;
 +
 +	switch (type) {
 +	case PTP_CLASS_V1_IPV4:
 +	case PTP_CLASS_V2_IPV4:
 +		offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
 +		break;
 +	case PTP_CLASS_V1_IPV6:
 +	case PTP_CLASS_V2_IPV6:
 +		offset = OFF_PTP6;
 +		break;
 +	case PTP_CLASS_V2_L2:
 +		offset = ETH_HLEN;
 +		break;
 +	case PTP_CLASS_V2_VLAN:
 +		offset = ETH_HLEN + VLAN_HLEN;
 +		break;
 +	default:
 +		return 0;
 +	}
 +
 +	if (type & PTP_CLASS_V1)
 +		offset += OFF_PTP_CONTROL;
 +
 +	if (skb->len < offset + 1)
 +		return 0;
 +
 +	msgtype = data + offset;
 +
 +	return (*msgtype & 0xf) == 0;
 +}
 +
  static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
  {
  	u16 *seqid;
@@@ -872,7 -740,7 +872,7 @@@ static void dp83640_clock_init(struct d
  	clock->caps.max_adj	= 1953124;
  	clock->caps.n_alarm	= 0;
  	clock->caps.n_ext_ts	= N_EXT_TS;
 -	clock->caps.n_per_out	= 0;
 +	clock->caps.n_per_out	= 1;
  	clock->caps.pps		= 0;
  	clock->caps.adjfreq	= ptp_dp83640_adjfreq;
  	clock->caps.adjtime	= ptp_dp83640_adjtime;
@@@ -1045,10 -913,16 +1045,10 @@@ static int dp83640_hwtstamp(struct phy_
  	if (cfg.flags) /* reserved for future extensions */
  		return -EINVAL;
  
 -	switch (cfg.tx_type) {
 -	case HWTSTAMP_TX_OFF:
 -		dp83640->hwts_tx_en = 0;
 -		break;
 -	case HWTSTAMP_TX_ON:
 -		dp83640->hwts_tx_en = 1;
 -		break;
 -	default:
 +	if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
  		return -ERANGE;
 -	}
 +
 +	dp83640->hwts_tx_en = cfg.tx_type;
  
  	switch (cfg.rx_filter) {
  	case HWTSTAMP_FILTER_NONE:
@@@ -1103,9 -977,6 +1103,9 @@@
  	if (dp83640->hwts_tx_en)
  		txcfg0 |= TX_TS_EN;
  
 +	if (dp83640->hwts_tx_en == HWTSTAMP_TX_ONESTEP_SYNC)
 +		txcfg0 |= SYNC_1STEP | CHK_1STEP;
 +
  	if (dp83640->hwts_rx_en)
  		rxcfg0 |= RX_TS_EN;
  
@@@ -1188,24 -1059,12 +1188,24 @@@ static void dp83640_txtstamp(struct phy
  {
  	struct dp83640_private *dp83640 = phydev->priv;
  
 -	if (!dp83640->hwts_tx_en) {
 +	switch (dp83640->hwts_tx_en) {
 +
 +	case HWTSTAMP_TX_ONESTEP_SYNC:
 +		if (is_sync(skb, type)) {
 +			kfree_skb(skb);
 +			return;
 +		}
 +		/* fall through */
 +	case HWTSTAMP_TX_ON:
 +		skb_queue_tail(&dp83640->tx_queue, skb);
 +		schedule_work(&dp83640->ts_work);
 +		break;
 +
 +	case HWTSTAMP_TX_OFF:
 +	default:
  		kfree_skb(skb);
 -		return;
 +		break;
  	}
 -	skb_queue_tail(&dp83640->tx_queue, skb);
 -	schedule_work(&dp83640->ts_work);
  }
  
  static struct phy_driver dp83640_driver = {
diff --combined include/linux/pci.h
index 581d2e2,9fc0122..f1b1ca1
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@@ -174,8 -174,6 +174,8 @@@ enum pci_dev_flags 
  	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1,
  	/* Device configuration is irrevocably lost if disabled into D3 */
  	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
 +	/* Provide indication device is assigned by a Virtual Machine Manager */
 +	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
  };
  
  enum pci_irq_reroute_variant {
@@@ -623,8 -621,9 +623,9 @@@ struct pci_driver 
  extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
  
  enum pcie_bus_config_types {
- 	PCIE_BUS_PERFORMANCE,
+ 	PCIE_BUS_TUNE_OFF,
  	PCIE_BUS_SAFE,
+ 	PCIE_BUS_PERFORMANCE,
  	PCIE_BUS_PEER2PEER,
  };
  
diff --combined net/batman-adv/soft-interface.c
index aceeabc,05dd351..f9cc957
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@@ -445,31 -445,30 +445,31 @@@ static void softif_batman_recv(struct s
  {
  	struct bat_priv *bat_priv = netdev_priv(dev);
  	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 -	struct batman_packet *batman_packet;
 +	struct batman_ogm_packet *batman_ogm_packet;
  	struct softif_neigh *softif_neigh = NULL;
  	struct hard_iface *primary_if = NULL;
  	struct softif_neigh *curr_softif_neigh = NULL;
  
  	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
 -		batman_packet = (struct batman_packet *)
 +		batman_ogm_packet = (struct batman_ogm_packet *)
  					(skb->data + ETH_HLEN + VLAN_HLEN);
  	else
 -		batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN);
 +		batman_ogm_packet = (struct batman_ogm_packet *)
 +							(skb->data + ETH_HLEN);
  
 -	if (batman_packet->version != COMPAT_VERSION)
 +	if (batman_ogm_packet->version != COMPAT_VERSION)
  		goto out;
  
 -	if (batman_packet->packet_type != BAT_PACKET)
 +	if (batman_ogm_packet->packet_type != BAT_OGM)
  		goto out;
  
 -	if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
 +	if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
  		goto out;
  
 -	if (is_my_mac(batman_packet->orig))
 +	if (is_my_mac(batman_ogm_packet->orig))
  		goto out;
  
 -	softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid);
 +	softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
  	if (!softif_neigh)
  		goto out;
  
@@@ -533,11 -532,11 +533,11 @@@ static int interface_set_mac_addr(struc
  	if (!is_valid_ether_addr(addr->sa_data))
  		return -EADDRNOTAVAIL;
  
 -	/* only modify transtable if it has been initialised before */
 +	/* only modify transtable if it has been initialized before */
  	if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
  		tt_local_remove(bat_priv, dev->dev_addr,
  				"mac address changed", false);
 -		tt_local_add(dev, addr->sa_data);
 +		tt_local_add(dev, addr->sa_data, NULL_IFINDEX);
  	}
  
  	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@@ -566,7 -565,7 +566,7 @@@ static int interface_tx(struct sk_buff 
  	struct orig_node *orig_node = NULL;
  	int data_len = skb->len, ret;
  	short vid = -1;
- 	bool do_bcast = false;
+ 	bool do_bcast;
  
  	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
  		goto dropped;
@@@ -596,19 -595,18 +596,19 @@@
  		goto dropped;
  
  	/* Register the client MAC in the transtable */
 -	tt_local_add(soft_iface, ethhdr->h_source);
 +	tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
  
 -	orig_node = transtable_search(bat_priv, ethhdr->h_dest);
 +	orig_node = transtable_search(bat_priv, ethhdr->h_source,
 +				      ethhdr->h_dest);
- 	if (is_multicast_ether_addr(ethhdr->h_dest) ||
- 				(orig_node && orig_node->gw_flags)) {
+ 	do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
 -	if (do_bcast ||	(orig_node && orig_node->gw_flags)) {
++	if (do_bcast || (orig_node && orig_node->gw_flags)) {
  		ret = gw_is_target(bat_priv, skb, orig_node);
  
  		if (ret < 0)
  			goto dropped;
  
- 		if (ret == 0)
- 			do_bcast = true;
+ 		if (ret)
+ 			do_bcast = false;
  	}
  
  	/* ethernet packet should be broadcasted */
@@@ -741,9 -739,6 +741,9 @@@ void interface_rx(struct net_device *so
  
  	soft_iface->last_rx = jiffies;
  
 +	if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
 +		goto dropped;
 +
  	netif_rx(skb);
  	goto out;
  
@@@ -801,8 -796,10 +801,8 @@@ struct net_device *softif_create(const 
  
  	soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
  
 -	if (!soft_iface) {
 -		pr_err("Unable to allocate the batman interface: %s\n", name);
 +	if (!soft_iface)
  		goto out;
 -	}
  
  	ret = register_netdevice(soft_iface);
  	if (ret < 0) {
@@@ -815,7 -812,6 +815,7 @@@
  
  	atomic_set(&bat_priv->aggregated_ogms, 1);
  	atomic_set(&bat_priv->bonding, 0);
 +	atomic_set(&bat_priv->ap_isolation, 0);
  	atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
  	atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
  	atomic_set(&bat_priv->gw_sel_class, 20);
diff --combined net/bridge/br_device.c
index 28325d1,ff3ed60..feb77ea
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@@ -91,7 -91,6 +91,6 @@@ static int br_dev_open(struct net_devic
  {
  	struct net_bridge *br = netdev_priv(dev);
  
- 	netif_carrier_off(dev);
  	netdev_update_features(dev);
  	netif_start_queue(dev);
  	br_stp_enable_bridge(br);
@@@ -108,8 -107,6 +107,6 @@@ static int br_dev_stop(struct net_devic
  {
  	struct net_bridge *br = netdev_priv(dev);
  
- 	netif_carrier_off(dev);
- 
  	br_stp_disable_bridge(br);
  	br_multicast_stop(br);
  
@@@ -304,7 -301,7 +301,7 @@@ static const struct net_device_ops br_n
  	.ndo_start_xmit		 = br_dev_xmit,
  	.ndo_get_stats64	 = br_get_stats64,
  	.ndo_set_mac_address	 = br_set_mac_address,
 -	.ndo_set_multicast_list	 = br_dev_set_multicast_list,
 +	.ndo_set_rx_mode	 = br_dev_set_multicast_list,
  	.ndo_change_mtu		 = br_change_mtu,
  	.ndo_do_ioctl		 = br_dev_ioctl,
  #ifdef CONFIG_NET_POLL_CONTROLLER
@@@ -361,8 -358,6 +358,8 @@@ void br_dev_setup(struct net_device *de
  	memcpy(br->group_addr, br_group_address, ETH_ALEN);
  
  	br->stp_enabled = BR_NO_STP;
 +	br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
 +
  	br->designated_root = br->bridge_id;
  	br->bridge_max_age = br->max_age = 20 * HZ;
  	br->bridge_hello_time = br->hello_time = 2 * HZ;
diff --combined net/ipv4/tcp_input.c
index 143221e,d73aab3..81cae64
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@@ -217,25 -217,16 +217,25 @@@ static inline void TCP_ECN_withdraw_cwr
  	tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
  }
  
 -static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
 +static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
  {
 -	if (tp->ecn_flags & TCP_ECN_OK) {
 -		if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
 -			tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
 +	if (!(tp->ecn_flags & TCP_ECN_OK))
 +		return;
 +
 +	switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
 +	case INET_ECN_NOT_ECT:
  		/* Funny extension: if ECT is not set on a segment,
 -		 * it is surely retransmit. It is not in ECN RFC,
 -		 * but Linux follows this rule. */
 -		else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
 +		 * and we already seen ECT on a previous segment,
 +		 * it is probably a retransmit.
 +		 */
 +		if (tp->ecn_flags & TCP_ECN_SEEN)
  			tcp_enter_quickack_mode((struct sock *)tp);
 +		break;
 +	case INET_ECN_CE:
 +		tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
 +		/* fallinto */
 +	default:
 +		tp->ecn_flags |= TCP_ECN_SEEN;
  	}
  }
  
@@@ -1398,9 -1389,7 +1398,7 @@@ static int tcp_shifted_skb(struct sock 
  
  	BUG_ON(!pcount);
  
- 	/* Tweak before seqno plays */
- 	if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
- 	    !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+ 	if (skb == tp->lost_skb_hint)
  		tp->lost_cnt_hint += pcount;
  
  	TCP_SKB_CB(prev)->end_seq += shifted;
@@@ -1449,7 -1438,7 +1447,7 @@@
  		tp->lost_cnt_hint -= tcp_skb_pcount(prev);
  	}
  
 -	TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags;
 +	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
  	if (skb == tcp_highest_sack(sk))
  		tcp_advance_highest_sack(sk, skb);
  
@@@ -2839,13 -2828,9 +2837,13 @@@ static int tcp_try_undo_loss(struct soc
  static inline void tcp_complete_cwr(struct sock *sk)
  {
  	struct tcp_sock *tp = tcp_sk(sk);
 -	/* Do not moderate cwnd if it's already undone in cwr or recovery */
 -	if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) {
 -		tp->snd_cwnd = tp->snd_ssthresh;
 +
 +	/* Do not moderate cwnd if it's already undone in cwr or recovery. */
 +	if (tp->undo_marker) {
 +		if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
 +			tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
 +		else /* PRR */
 +			tp->snd_cwnd = tp->snd_ssthresh;
  		tp->snd_cwnd_stamp = tcp_time_stamp;
  	}
  	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
@@@ -2963,38 -2948,6 +2961,38 @@@ void tcp_simple_retransmit(struct sock 
  }
  EXPORT_SYMBOL(tcp_simple_retransmit);
  
 +/* This function implements the PRR algorithm, specifcally the PRR-SSRB
 + * (proportional rate reduction with slow start reduction bound) as described in
 + * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
 + * It computes the number of packets to send (sndcnt) based on packets newly
 + * delivered:
 + *   1) If the packets in flight is larger than ssthresh, PRR spreads the
 + *	cwnd reductions across a full RTT.
 + *   2) If packets in flight is lower than ssthresh (such as due to excess
 + *	losses and/or application stalls), do not perform any further cwnd
 + *	reductions, but instead slow start up to ssthresh.
 + */
 +static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
 +					int fast_rexmit, int flag)
 +{
 +	struct tcp_sock *tp = tcp_sk(sk);
 +	int sndcnt = 0;
 +	int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
 +
 +	if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
 +		u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
 +			       tp->prior_cwnd - 1;
 +		sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
 +	} else {
 +		sndcnt = min_t(int, delta,
 +			       max_t(int, tp->prr_delivered - tp->prr_out,
 +				     newly_acked_sacked) + 1);
 +	}
 +
 +	sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
 +	tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
 +}
 +
  /* Process an event, which can update packets-in-flight not trivially.
   * Main goal of this function is to calculate new estimate for left_out,
   * taking into account both packets sitting in receiver's buffer and
@@@ -3006,8 -2959,7 +3004,8 @@@
   * It does _not_ decide what to send, it is made in function
   * tcp_xmit_retransmit_queue().
   */
 -static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
 +static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
 +				  int newly_acked_sacked, int flag)
  {
  	struct inet_connection_sock *icsk = inet_csk(sk);
  	struct tcp_sock *tp = tcp_sk(sk);
@@@ -3157,17 -3109,13 +3155,17 @@@
  
  		tp->bytes_acked = 0;
  		tp->snd_cwnd_cnt = 0;
 +		tp->prior_cwnd = tp->snd_cwnd;
 +		tp->prr_delivered = 0;
 +		tp->prr_out = 0;
  		tcp_set_ca_state(sk, TCP_CA_Recovery);
  		fast_rexmit = 1;
  	}
  
  	if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
  		tcp_update_scoreboard(sk, fast_rexmit);
 -	tcp_cwnd_down(sk, flag);
 +	tp->prr_delivered += newly_acked_sacked;
 +	tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
  	tcp_xmit_retransmit_queue(sk);
  }
  
@@@ -3348,7 -3296,7 +3346,7 @@@ static int tcp_clean_rtx_queue(struct s
  		 * connection startup slow start one packet too
  		 * quickly.  This is severely frowned upon behavior.
  		 */
 -		if (!(scb->flags & TCPHDR_SYN)) {
 +		if (!(scb->tcp_flags & TCPHDR_SYN)) {
  			flag |= FLAG_DATA_ACKED;
  		} else {
  			flag |= FLAG_SYN_ACKED;
@@@ -3682,8 -3630,6 +3680,8 @@@ static int tcp_ack(struct sock *sk, str
  	u32 prior_in_flight;
  	u32 prior_fackets;
  	int prior_packets;
 +	int prior_sacked = tp->sacked_out;
 +	int newly_acked_sacked = 0;
  	int frto_cwnd = 0;
  
  	/* If the ack is older than previous acks
@@@ -3755,9 -3701,6 +3753,9 @@@
  	/* See if we can take anything off of the retransmit queue. */
  	flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
  
 +	newly_acked_sacked = (prior_packets - prior_sacked) -
 +			     (tp->packets_out - tp->sacked_out);
 +
  	if (tp->frto_counter)
  		frto_cwnd = tcp_process_frto(sk, flag);
  	/* Guarantee sacktag reordering detection against wrap-arounds */
@@@ -3770,7 -3713,7 +3768,7 @@@
  		    tcp_may_raise_cwnd(sk, flag))
  			tcp_cong_avoid(sk, ack, prior_in_flight);
  		tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
 -				      flag);
 +				      newly_acked_sacked, flag);
  	} else {
  		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
  			tcp_cong_avoid(sk, ack, prior_in_flight);
diff --combined net/ipv4/tcp_ipv4.c
index dd3fad9,7963e03..48da7cc
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@@ -927,18 -927,21 +927,21 @@@ int tcp_v4_md5_do_add(struct sock *sk, 
  			}
  			sk_nocaps_add(sk, NETIF_F_GSO_MASK);
  		}
- 		if (tcp_alloc_md5sig_pool(sk) == NULL) {
+ 
+ 		md5sig = tp->md5sig_info;
+ 		if (md5sig->entries4 == 0 &&
+ 		    tcp_alloc_md5sig_pool(sk) == NULL) {
  			kfree(newkey);
  			return -ENOMEM;
  		}
- 		md5sig = tp->md5sig_info;
  
  		if (md5sig->alloced4 == md5sig->entries4) {
  			keys = kmalloc((sizeof(*keys) *
  					(md5sig->entries4 + 1)), GFP_ATOMIC);
  			if (!keys) {
  				kfree(newkey);
- 				tcp_free_md5sig_pool();
+ 				if (md5sig->entries4 == 0)
+ 					tcp_free_md5sig_pool();
  				return -ENOMEM;
  			}
  
@@@ -982,6 -985,7 +985,7 @@@ int tcp_v4_md5_do_del(struct sock *sk, 
  				kfree(tp->md5sig_info->keys4);
  				tp->md5sig_info->keys4 = NULL;
  				tp->md5sig_info->alloced4 = 0;
+ 				tcp_free_md5sig_pool();
  			} else if (tp->md5sig_info->entries4 != i) {
  				/* Need to do some manipulation */
  				memmove(&tp->md5sig_info->keys4[i],
@@@ -989,7 -993,6 +993,6 @@@
  					(tp->md5sig_info->entries4 - i) *
  					 sizeof(struct tcp4_md5sig_key));
  			}
- 			tcp_free_md5sig_pool();
  			return 0;
  		}
  	}
@@@ -1585,7 -1588,7 +1588,7 @@@ int tcp_v4_do_rcv(struct sock *sk, stru
  #endif
  
  	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
 -		sock_rps_save_rxhash(sk, skb->rxhash);
 +		sock_rps_save_rxhash(sk, skb);
  		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
  			rsk = sk;
  			goto reset;
@@@ -1602,7 -1605,7 +1605,7 @@@
  			goto discard;
  
  		if (nsk != sk) {
 -			sock_rps_save_rxhash(nsk, skb->rxhash);
 +			sock_rps_save_rxhash(nsk, skb);
  			if (tcp_child_process(sk, nsk, skb)) {
  				rsk = nsk;
  				goto reset;
@@@ -1610,7 -1613,7 +1613,7 @@@
  			return 0;
  		}
  	} else
 -		sock_rps_save_rxhash(sk, skb->rxhash);
 +		sock_rps_save_rxhash(sk, skb);
  
  	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
  		rsk = sk;
@@@ -1677,7 -1680,7 +1680,7 @@@ int tcp_v4_rcv(struct sk_buff *skb
  				    skb->len - th->doff * 4);
  	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
  	TCP_SKB_CB(skb)->when	 = 0;
 -	TCP_SKB_CB(skb)->flags	 = iph->tos;
 +	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
  	TCP_SKB_CB(skb)->sacked	 = 0;
  
  	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
diff --combined net/ipv6/tcp_ipv6.c
index 00797d8,7b8fc57..5357902
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@@ -591,7 -591,8 +591,8 @@@ static int tcp_v6_md5_do_add(struct soc
  			}
  			sk_nocaps_add(sk, NETIF_F_GSO_MASK);
  		}
- 		if (tcp_alloc_md5sig_pool(sk) == NULL) {
+ 		if (tp->md5sig_info->entries6 == 0 &&
+ 			tcp_alloc_md5sig_pool(sk) == NULL) {
  			kfree(newkey);
  			return -ENOMEM;
  		}
@@@ -600,8 -601,9 +601,9 @@@
  				       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
  
  			if (!keys) {
- 				tcp_free_md5sig_pool();
  				kfree(newkey);
+ 				if (tp->md5sig_info->entries6 == 0)
+ 					tcp_free_md5sig_pool();
  				return -ENOMEM;
  			}
  
@@@ -647,6 -649,7 +649,7 @@@ static int tcp_v6_md5_do_del(struct soc
  				kfree(tp->md5sig_info->keys6);
  				tp->md5sig_info->keys6 = NULL;
  				tp->md5sig_info->alloced6 = 0;
+ 				tcp_free_md5sig_pool();
  			} else {
  				/* shrink the database */
  				if (tp->md5sig_info->entries6 != i)
@@@ -655,7 -658,6 +658,6 @@@
  						(tp->md5sig_info->entries6 - i)
  						* sizeof (tp->md5sig_info->keys6[0]));
  			}
- 			tcp_free_md5sig_pool();
  			return 0;
  		}
  	}
@@@ -1383,6 -1385,8 +1385,8 @@@ static struct sock * tcp_v6_syn_recv_so
  		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
  #endif
  
+ 		newnp->ipv6_ac_list = NULL;
+ 		newnp->ipv6_fl_list = NULL;
  		newnp->pktoptions  = NULL;
  		newnp->opt	   = NULL;
  		newnp->mcast_oif   = inet6_iif(skb);
@@@ -1447,6 -1451,7 +1451,7 @@@
  	   First: no IPv4 options.
  	 */
  	newinet->inet_opt = NULL;
+ 	newnp->ipv6_ac_list = NULL;
  	newnp->ipv6_fl_list = NULL;
  
  	/* Clone RX bits */
@@@ -1603,7 -1608,7 +1608,7 @@@ static int tcp_v6_do_rcv(struct sock *s
  		opt_skb = skb_clone(skb, GFP_ATOMIC);
  
  	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
 -		sock_rps_save_rxhash(sk, skb->rxhash);
 +		sock_rps_save_rxhash(sk, skb);
  		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
  			goto reset;
  		if (opt_skb)
@@@ -1625,7 -1630,7 +1630,7 @@@
  		 * the new socket..
  		 */
  		if(nsk != sk) {
 -			sock_rps_save_rxhash(nsk, skb->rxhash);
 +			sock_rps_save_rxhash(nsk, skb);
  			if (tcp_child_process(sk, nsk, skb))
  				goto reset;
  			if (opt_skb)
@@@ -1633,7 -1638,7 +1638,7 @@@
  			return 0;
  		}
  	} else
 -		sock_rps_save_rxhash(sk, skb->rxhash);
 +		sock_rps_save_rxhash(sk, skb);
  
  	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
  		goto reset;
@@@ -1717,7 -1722,7 +1722,7 @@@ static int tcp_v6_rcv(struct sk_buff *s
  				    skb->len - th->doff*4);
  	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
  	TCP_SKB_CB(skb)->when = 0;
 -	TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
 +	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
  	TCP_SKB_CB(skb)->sacked = 0;
  
  	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
diff --combined net/packet/af_packet.c
index 25e68f5,fabb4fa..dac91ab
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@@ -40,10 -40,6 +40,10 @@@
   *					byte arrays at the end of sockaddr_ll
   *					and packet_mreq.
   *		Johann Baudy	:	Added TX RING.
 + *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
 + *					layer.
 + *					Copyright (C) 2011, <lokec at ccs.neu.edu>
 + *
   *
   *		This program is free software; you can redistribute it and/or
   *		modify it under the terms of the GNU General Public License
@@@ -165,56 -161,9 +165,56 @@@ struct packet_mreq_max 
  	unsigned char	mr_address[MAX_ADDR_LEN];
  };
  
 -static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
 +static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
  		int closing, int tx_ring);
  
 +
 +#define V3_ALIGNMENT	(8)
 +
 +#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 +
 +#define BLK_PLUS_PRIV(sz_of_priv) \
 +	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 +
 +/* kbdq - kernel block descriptor queue */
 +struct tpacket_kbdq_core {
 +	struct pgv	*pkbdq;
 +	unsigned int	feature_req_word;
 +	unsigned int	hdrlen;
 +	unsigned char	reset_pending_on_curr_blk;
 +	unsigned char   delete_blk_timer;
 +	unsigned short	kactive_blk_num;
 +	unsigned short	blk_sizeof_priv;
 +
 +	/* last_kactive_blk_num:
 +	 * trick to see if user-space has caught up
 +	 * in order to avoid refreshing timer when every single pkt arrives.
 +	 */
 +	unsigned short	last_kactive_blk_num;
 +
 +	char		*pkblk_start;
 +	char		*pkblk_end;
 +	int		kblk_size;
 +	unsigned int	knum_blocks;
 +	uint64_t	knxt_seq_num;
 +	char		*prev;
 +	char		*nxt_offset;
 +	struct sk_buff	*skb;
 +
 +	atomic_t	blk_fill_in_prog;
 +
 +	/* Default is set to 8ms */
 +#define DEFAULT_PRB_RETIRE_TOV	(8)
 +
 +	unsigned short  retire_blk_tov;
 +	unsigned short  version;
 +	unsigned long	tov_in_jiffies;
 +
 +	/* timer to retire an outstanding block */
 +	struct timer_list retire_blk_timer;
 +};
 +
 +#define PGV_FROM_VMALLOC 1
  struct pgv {
  	char *buffer;
  };
@@@ -230,44 -179,12 +230,44 @@@ struct packet_ring_buffer 
  	unsigned int		pg_vec_pages;
  	unsigned int		pg_vec_len;
  
 +	struct tpacket_kbdq_core	prb_bdqc;
  	atomic_t		pending;
  };
  
 +#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 +#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 +#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 +#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 +#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 +#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 +#define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 +
  struct packet_sock;
  static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
  
 +static void *packet_previous_frame(struct packet_sock *po,
 +		struct packet_ring_buffer *rb,
 +		int status);
 +static void packet_increment_head(struct packet_ring_buffer *buff);
 +static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
 +			struct tpacket_block_desc *);
 +static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 +			struct packet_sock *);
 +static void prb_retire_current_block(struct tpacket_kbdq_core *,
 +		struct packet_sock *, unsigned int status);
 +static int prb_queue_frozen(struct tpacket_kbdq_core *);
 +static void prb_open_block(struct tpacket_kbdq_core *,
 +		struct tpacket_block_desc *);
 +static void prb_retire_rx_blk_timer_expired(unsigned long);
 +static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 +static void prb_init_blk_timer(struct packet_sock *,
 +		struct tpacket_kbdq_core *,
 +		void (*func) (unsigned long));
 +static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 +static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 +		struct tpacket3_hdr *);
 +static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 +		struct tpacket3_hdr *);
  static void packet_flush_mclist(struct sock *sk);
  
  struct packet_fanout;
@@@ -276,7 -193,6 +276,7 @@@ struct packet_sock 
  	struct sock		sk;
  	struct packet_fanout	*fanout;
  	struct tpacket_stats	stats;
 +	union  tpacket_stats_u	stats_u;
  	struct packet_ring_buffer	rx_ring;
  	struct packet_ring_buffer	tx_ring;
  	int			copy_thresh;
@@@ -326,15 -242,6 +326,15 @@@ struct packet_skb_cb 
  
  #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
  
 +#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 +#define GET_PBLOCK_DESC(x, bid)	\
 +	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 +#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 +	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 +#define GET_NEXT_PRB_BLK_NUM(x) \
 +	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 +	((x)->kactive_blk_num+1) : 0)
 +
  static inline struct packet_sock *pkt_sk(struct sock *sk)
  {
  	return (struct packet_sock *)sk;
@@@ -418,9 -325,8 +418,9 @@@ static void __packet_set_status(struct 
  		h.h2->tp_status = status;
  		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
  		break;
 +	case TPACKET_V3:
  	default:
 -		pr_err("TPACKET version not supported\n");
 +		WARN(1, "TPACKET version not supported.\n");
  		BUG();
  	}
  
@@@ -445,9 -351,8 +445,9 @@@ static int __packet_get_status(struct p
  	case TPACKET_V2:
  		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
  		return h.h2->tp_status;
 +	case TPACKET_V3:
  	default:
 -		pr_err("TPACKET version not supported\n");
 +		WARN(1, "TPACKET version not supported.\n");
  		BUG();
  		return 0;
  	}
@@@ -484,670 -389,6 +484,670 @@@ static inline void *packet_current_fram
  	return packet_lookup_frame(po, rb, rb->head, status);
  }
  
 +static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 +{
 +	del_timer_sync(&pkc->retire_blk_timer);
 +}
 +
 +static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 +		int tx_ring,
 +		struct sk_buff_head *rb_queue)
 +{
 +	struct tpacket_kbdq_core *pkc;
 +
 +	pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
 +
 +	spin_lock(&rb_queue->lock);
 +	pkc->delete_blk_timer = 1;
 +	spin_unlock(&rb_queue->lock);
 +
 +	prb_del_retire_blk_timer(pkc);
 +}
 +
 +static void prb_init_blk_timer(struct packet_sock *po,
 +		struct tpacket_kbdq_core *pkc,
 +		void (*func) (unsigned long))
 +{
 +	init_timer(&pkc->retire_blk_timer);
 +	pkc->retire_blk_timer.data = (long)po;
 +	pkc->retire_blk_timer.function = func;
 +	pkc->retire_blk_timer.expires = jiffies;
 +}
 +
 +static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
 +{
 +	struct tpacket_kbdq_core *pkc;
 +
 +	if (tx_ring)
 +		BUG();
 +
 +	pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
 +	prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
 +}
 +
 +static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 +				int blk_size_in_bytes)
 +{
 +	struct net_device *dev;
 +	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
 +	struct ethtool_cmd ecmd;
 +	int err;
 +
 +	rtnl_lock();
 +	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 +	if (unlikely(!dev)) {
 +		rtnl_unlock();
 +		return DEFAULT_PRB_RETIRE_TOV;
 +	}
 +	err = __ethtool_get_settings(dev, &ecmd);
 +	rtnl_unlock();
 +	if (!err) {
 +		switch (ecmd.speed) {
 +		case SPEED_10000:
 +			msec = 1;
 +			div = 10000/1000;
 +			break;
 +		case SPEED_1000:
 +			msec = 1;
 +			div = 1000/1000;
 +			break;
 +		/*
 +		 * If the link speed is so slow you don't really
 +		 * need to worry about perf anyways
 +		 */
 +		case SPEED_100:
 +		case SPEED_10:
 +		default:
 +			return DEFAULT_PRB_RETIRE_TOV;
 +		}
 +	}
 +
 +	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 +
 +	if (div)
 +		mbits /= div;
 +
 +	tmo = mbits * msec;
 +
 +	if (div)
 +		return tmo+1;
 +	return tmo;
 +}
 +
 +static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 +			union tpacket_req_u *req_u)
 +{
 +	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 +}
 +
 +static void init_prb_bdqc(struct packet_sock *po,
 +			struct packet_ring_buffer *rb,
 +			struct pgv *pg_vec,
 +			union tpacket_req_u *req_u, int tx_ring)
 +{
 +	struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
 +	struct tpacket_block_desc *pbd;
 +
 +	memset(p1, 0x0, sizeof(*p1));
 +
 +	p1->knxt_seq_num = 1;
 +	p1->pkbdq = pg_vec;
 +	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 +	p1->pkblk_start	= (char *)pg_vec[0].buffer;
 +	p1->kblk_size = req_u->req3.tp_block_size;
 +	p1->knum_blocks	= req_u->req3.tp_block_nr;
 +	p1->hdrlen = po->tp_hdrlen;
 +	p1->version = po->tp_version;
 +	p1->last_kactive_blk_num = 0;
 +	po->stats_u.stats3.tp_freeze_q_cnt = 0;
 +	if (req_u->req3.tp_retire_blk_tov)
 +		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 +	else
 +		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 +						req_u->req3.tp_block_size);
 +	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 +	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 +
 +	prb_init_ft_ops(p1, req_u);
 +	prb_setup_retire_blk_timer(po, tx_ring);
 +	prb_open_block(p1, pbd);
 +}
 +
 +/*  Do NOT update the last_blk_num first.
 + *  Assumes sk_buff_head lock is held.
 + */
 +static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 +{
 +	mod_timer(&pkc->retire_blk_timer,
 +			jiffies + pkc->tov_in_jiffies);
 +	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 +}
 +
 +/*
 + * Timer logic:
 + * 1) We refresh the timer only when we open a block.
 + *    By doing this we don't waste cycles refreshing the timer
 + *	  on packet-by-packet basis.
 + *
 + * With a 1MB block-size, on a 1Gbps line, it will take
 + * i) ~8 ms to fill a block + ii) memcpy etc.
 + * In this cut we are not accounting for the memcpy time.
 + *
 + * So, if the user sets the 'tmo' to 10ms then the timer
 + * will never fire while the block is still getting filled
 + * (which is what we want). However, the user could choose
 + * to close a block early and that's fine.
 + *
 + * But when the timer does fire, we check whether or not to refresh it.
 + * Since the tmo granularity is in msecs, it is not too expensive
 + * to refresh the timer, lets say every '8' msecs.
 + * Either the user can set the 'tmo' or we can derive it based on
 + * a) line-speed and b) block-size.
 + * prb_calc_retire_blk_tmo() calculates the tmo.
 + *
 + */
 +static void prb_retire_rx_blk_timer_expired(unsigned long data)
 +{
 +	struct packet_sock *po = (struct packet_sock *)data;
 +	struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
 +	unsigned int frozen;
 +	struct tpacket_block_desc *pbd;
 +
 +	spin_lock(&po->sk.sk_receive_queue.lock);
 +
 +	frozen = prb_queue_frozen(pkc);
 +	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 +
 +	if (unlikely(pkc->delete_blk_timer))
 +		goto out;
 +
 +	/* We only need to plug the race when the block is partially filled.
 +	 * tpacket_rcv:
 +	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 +	 *		copy_bits() is in progress ...
 +	 *		timer fires on other cpu:
 +	 *		we can't retire the current block because copy_bits
 +	 *		is in progress.
 +	 *
 +	 */
 +	if (BLOCK_NUM_PKTS(pbd)) {
 +		while (atomic_read(&pkc->blk_fill_in_prog)) {
 +			/* Waiting for skb_copy_bits to finish... */
 +			cpu_relax();
 +		}
 +	}
 +
 +	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 +		if (!frozen) {
 +			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 +			if (!prb_dispatch_next_block(pkc, po))
 +				goto refresh_timer;
 +			else
 +				goto out;
 +		} else {
 +			/* Case 1. Queue was frozen because user-space was
 +			 *	   lagging behind.
 +			 */
 +			if (prb_curr_blk_in_use(pkc, pbd)) {
 +				/*
 +				 * Ok, user-space is still behind.
 +				 * So just refresh the timer.
 +				 */
 +				goto refresh_timer;
 +			} else {
 +			       /* Case 2. queue was frozen,user-space caught up,
 +				* now the link went idle && the timer fired.
 +				* We don't have a block to close.So we open this
 +				* block and restart the timer.
 +				* opening a block thaws the queue,restarts timer
 +				* Thawing/timer-refresh is a side effect.
 +				*/
 +				prb_open_block(pkc, pbd);
 +				goto out;
 +			}
 +		}
 +	}
 +
 +refresh_timer:
 +	_prb_refresh_rx_retire_blk_timer(pkc);
 +
 +out:
 +	spin_unlock(&po->sk.sk_receive_queue.lock);
 +}
 +
 +static inline void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 +		struct tpacket_block_desc *pbd1, __u32 status)
 +{
 +	/* Flush everything minus the block header */
 +
 +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 +	u8 *start, *end;
 +
 +	start = (u8 *)pbd1;
 +
 +	/* Skip the block header(we know header WILL fit in 4K) */
 +	start += PAGE_SIZE;
 +
 +	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 +	for (; start < end; start += PAGE_SIZE)
 +		flush_dcache_page(pgv_to_page(start));
 +
 +	smp_wmb();
 +#endif
 +
 +	/* Now update the block status. */
 +
 +	BLOCK_STATUS(pbd1) = status;
 +
 +	/* Flush the block header */
 +
 +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 +	start = (u8 *)pbd1;
 +	flush_dcache_page(pgv_to_page(start));
 +
 +	smp_wmb();
 +#endif
 +}
 +
 +/*
 + * Side effect:
 + *
 + * 1) flush the block
 + * 2) Increment active_blk_num
 + *
 + * Note:We DONT refresh the timer on purpose.
 + *	Because almost always the next block will be opened.
 + */
 +static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 +		struct tpacket_block_desc *pbd1,
 +		struct packet_sock *po, unsigned int stat)
 +{
 +	__u32 status = TP_STATUS_USER | stat;
 +
 +	struct tpacket3_hdr *last_pkt;
 +	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 +
 +	if (po->stats.tp_drops)
 +		status |= TP_STATUS_LOSING;
 +
 +	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 +	last_pkt->tp_next_offset = 0;
 +
 +	/* Get the ts of the last pkt */
 +	if (BLOCK_NUM_PKTS(pbd1)) {
 +		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 +		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 +	} else {
 +		/* Ok, we tmo'd - so get the current time */
 +		struct timespec ts;
 +		getnstimeofday(&ts);
 +		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 +		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 +	}
 +
 +	smp_wmb();
 +
 +	/* Flush the block */
 +	prb_flush_block(pkc1, pbd1, status);
 +
 +	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 +}
 +
 +static inline void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 +{
 +	pkc->reset_pending_on_curr_blk = 0;
 +}
 +
 +/*
 + * Side effect of opening a block:
 + *
 + * 1) prb_queue is thawed.
 + * 2) retire_blk_timer is refreshed.
 + *
 + */
 +static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 +	struct tpacket_block_desc *pbd1)
 +{
 +	struct timespec ts;
 +	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 +
 +	smp_rmb();
 +
 +	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
 +
 +		/* We could have just memset this but we will lose the
 +		 * flexibility of making the priv area sticky
 +		 */
 +		BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 +		BLOCK_NUM_PKTS(pbd1) = 0;
 +		BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 +		getnstimeofday(&ts);
 +		h1->ts_first_pkt.ts_sec = ts.tv_sec;
 +		h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 +		pkc1->pkblk_start = (char *)pbd1;
 +		pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
 +		BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
 +		BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 +		BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 +		pbd1->version = pkc1->version;
 +		pkc1->prev = pkc1->nxt_offset;
 +		pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 +		prb_thaw_queue(pkc1);
 +		_prb_refresh_rx_retire_blk_timer(pkc1);
 +
 +		smp_wmb();
 +
 +		return;
 +	}
 +
 +	WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
 +		pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
 +	dump_stack();
 +	BUG();
 +}
 +
 +/*
 + * Queue freeze logic:
 + * 1) Assume tp_block_nr = 8 blocks.
 + * 2) At time 't0', user opens Rx ring.
 + * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 + * 4) user-space is either sleeping or processing block '0'.
 + * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 + *    it will close block-7,loop around and try to fill block '0'.
 + *    call-flow:
 + *    __packet_lookup_frame_in_block
 + *      prb_retire_current_block()
 + *      prb_dispatch_next_block()
 + *        |->(BLOCK_STATUS == USER) evaluates to true
 + *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 + * 6) Now there are two cases:
 + *    6.1) Link goes idle right after the queue is frozen.
 + *         But remember, the last open_block() refreshed the timer.
 + *         When this timer expires,it will refresh itself so that we can
 + *         re-open block-0 in near future.
 + *    6.2) Link is busy and keeps on receiving packets. This is a simple
 + *         case and __packet_lookup_frame_in_block will check if block-0
 + *         is free and can now be re-used.
 + */
 +static inline void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 +				  struct packet_sock *po)
 +{
 +	pkc->reset_pending_on_curr_blk = 1;
 +	po->stats_u.stats3.tp_freeze_q_cnt++;
 +}
 +
 +#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 +
 +/*
 + * If the next block is free then we will dispatch it
 + * and return a good offset.
 + * Else, we will freeze the queue.
 + * So, caller must check the return value.
 + */
 +static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 +		struct packet_sock *po)
 +{
 +	struct tpacket_block_desc *pbd;
 +
 +	smp_rmb();
 +
 +	/* 1. Get current block num */
 +	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 +
 +	/* 2. If this block is currently in_use then freeze the queue */
 +	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 +		prb_freeze_queue(pkc, po);
 +		return NULL;
 +	}
 +
 +	/*
 +	 * 3.
 +	 * open this block and return the offset where the first packet
 +	 * needs to get stored.
 +	 */
 +	prb_open_block(pkc, pbd);
 +	return (void *)pkc->nxt_offset;
 +}
 +
 +static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 +		struct packet_sock *po, unsigned int status)
 +{
 +	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 +
 +	/* retire/close the current block */
 +	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 +		/*
 +		 * Plug the case where copy_bits() is in progress on
 +		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 +		 * have space to copy the pkt in the current block and
 +		 * called prb_retire_current_block()
 +		 *
 +		 * We don't need to worry about the TMO case because
 +		 * the timer-handler already handled this case.
 +		 */
 +		if (!(status & TP_STATUS_BLK_TMO)) {
 +			while (atomic_read(&pkc->blk_fill_in_prog)) {
 +				/* Waiting for skb_copy_bits to finish... */
 +				cpu_relax();
 +			}
 +		}
 +		prb_close_block(pkc, pbd, po, status);
 +		return;
 +	}
 +
 +	WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
 +	dump_stack();
 +	BUG();
 +}
 +
 +static inline int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
 +				      struct tpacket_block_desc *pbd)
 +{
 +	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 +}
 +
 +static inline int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 +{
 +	return pkc->reset_pending_on_curr_blk;
 +}
 +
 +static inline void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 +{
 +	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 +	atomic_dec(&pkc->blk_fill_in_prog);
 +}
 +
 +static inline void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 +			struct tpacket3_hdr *ppd)
 +{
 +	ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
 +}
 +
 +static inline void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 +			struct tpacket3_hdr *ppd)
 +{
 +	ppd->hv1.tp_rxhash = 0;
 +}
 +
 +static inline void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 +			struct tpacket3_hdr *ppd)
 +{
 +	if (vlan_tx_tag_present(pkc->skb)) {
 +		ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
 +		ppd->tp_status = TP_STATUS_VLAN_VALID;
 +	} else {
 +		ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
 +	}
 +}
 +
 +static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
 +			struct tpacket3_hdr *ppd)
 +{
 +	prb_fill_vlan_info(pkc, ppd);
 +
 +	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
 +		prb_fill_rxhash(pkc, ppd);
 +	else
 +		prb_clear_rxhash(pkc, ppd);
 +}
 +
 +static inline void prb_fill_curr_block(char *curr,
 +				struct tpacket_kbdq_core *pkc,
 +				struct tpacket_block_desc *pbd,
 +				unsigned int len)
 +{
 +	struct tpacket3_hdr *ppd;
 +
 +	ppd  = (struct tpacket3_hdr *)curr;
 +	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
 +	pkc->prev = curr;
 +	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
 +	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
 +	BLOCK_NUM_PKTS(pbd) += 1;
 +	atomic_inc(&pkc->blk_fill_in_prog);
 +	prb_run_all_ft_ops(pkc, ppd);
 +}
 +
 +/* Assumes caller has the sk->rx_queue.lock */
 +static void *__packet_lookup_frame_in_block(struct packet_sock *po,
 +					    struct sk_buff *skb,
 +						int status,
 +					    unsigned int len
 +					    )
 +{
 +	struct tpacket_kbdq_core *pkc;
 +	struct tpacket_block_desc *pbd;
 +	char *curr, *end;
 +
 +	pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
 +	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 +
 +	/* Queue is frozen when user space is lagging behind */
 +	if (prb_queue_frozen(pkc)) {
 +		/*
 +		 * Check if that last block which caused the queue to freeze,
 +		 * is still in_use by user-space.
 +		 */
 +		if (prb_curr_blk_in_use(pkc, pbd)) {
 +			/* Can't record this packet */
 +			return NULL;
 +		} else {
 +			/*
 +			 * Ok, the block was released by user-space.
 +			 * Now let's open that block.
 +			 * opening a block also thaws the queue.
 +			 * Thawing is a side effect.
 +			 */
 +			prb_open_block(pkc, pbd);
 +		}
 +	}
 +
 +	smp_mb();
 +	curr = pkc->nxt_offset;
 +	pkc->skb = skb;
 +	end = (char *) ((char *)pbd + pkc->kblk_size);
 +
 +	/* first try the current block */
 +	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
 +		prb_fill_curr_block(curr, pkc, pbd, len);
 +		return (void *)curr;
 +	}
 +
 +	/* Ok, close the current block */
 +	prb_retire_current_block(pkc, po, 0);
 +
 +	/* Now, try to dispatch the next block */
 +	curr = (char *)prb_dispatch_next_block(pkc, po);
 +	if (curr) {
 +		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 +		prb_fill_curr_block(curr, pkc, pbd, len);
 +		return (void *)curr;
 +	}
 +
 +	/*
 +	 * No free blocks are available.user_space hasn't caught up yet.
 +	 * Queue was just frozen and now this packet will get dropped.
 +	 */
 +	return NULL;
 +}
 +
 +static inline void *packet_current_rx_frame(struct packet_sock *po,
 +					    struct sk_buff *skb,
 +					    int status, unsigned int len)
 +{
 +	char *curr = NULL;
 +	switch (po->tp_version) {
 +	case TPACKET_V1:
 +	case TPACKET_V2:
 +		curr = packet_lookup_frame(po, &po->rx_ring,
 +					po->rx_ring.head, status);
 +		return curr;
 +	case TPACKET_V3:
 +		return __packet_lookup_frame_in_block(po, skb, status, len);
 +	default:
 +		WARN(1, "TPACKET version not supported\n");
 +		BUG();
 +		return 0;
 +	}
 +}
 +
 +static inline void *prb_lookup_block(struct packet_sock *po,
 +				     struct packet_ring_buffer *rb,
 +				     unsigned int previous,
 +				     int status)
 +{
 +	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 +	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
 +
 +	if (status != BLOCK_STATUS(pbd))
 +		return NULL;
 +	return pbd;
 +}
 +
 +static inline int prb_previous_blk_num(struct packet_ring_buffer *rb)
 +{
 +	unsigned int prev;
 +	if (rb->prb_bdqc.kactive_blk_num)
 +		prev = rb->prb_bdqc.kactive_blk_num-1;
 +	else
 +		prev = rb->prb_bdqc.knum_blocks-1;
 +	return prev;
 +}
 +
 +/* Assumes caller has held the rx_queue.lock */
 +static inline void *__prb_previous_block(struct packet_sock *po,
 +					 struct packet_ring_buffer *rb,
 +					 int status)
 +{
 +	unsigned int previous = prb_previous_blk_num(rb);
 +	return prb_lookup_block(po, rb, previous, status);
 +}
 +
 +static inline void *packet_previous_rx_frame(struct packet_sock *po,
 +					     struct packet_ring_buffer *rb,
 +					     int status)
 +{
 +	if (po->tp_version <= TPACKET_V2)
 +		return packet_previous_frame(po, rb, status);
 +
 +	return __prb_previous_block(po, rb, status);
 +}
 +
 +static inline void packet_increment_rx_head(struct packet_sock *po,
 +					    struct packet_ring_buffer *rb)
 +{
 +	switch (po->tp_version) {
 +	case TPACKET_V1:
 +	case TPACKET_V2:
 +		return packet_increment_head(rb);
 +	case TPACKET_V3:
 +	default:
 +		WARN(1, "TPACKET version not supported.\n");
 +		BUG();
 +		return;
 +	}
 +}
 +
  static inline void *packet_previous_frame(struct packet_sock *po,
  		struct packet_ring_buffer *rb,
  		int status)
@@@ -1720,7 -961,10 +1720,10 @@@ static int packet_rcv(struct sk_buff *s
  	return 0;
  
  drop_n_acct:
- 	po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
+ 	spin_lock(&sk->sk_receive_queue.lock);
+ 	po->stats.tp_drops++;
+ 	atomic_inc(&sk->sk_drops);
+ 	spin_unlock(&sk->sk_receive_queue.lock);
  
  drop_n_restore:
  	if (skb_head != skb->data && skb_shared(skb)) {
@@@ -1741,13 -985,12 +1744,13 @@@ static int tpacket_rcv(struct sk_buff *
  	union {
  		struct tpacket_hdr *h1;
  		struct tpacket2_hdr *h2;
 +		struct tpacket3_hdr *h3;
  		void *raw;
  	} h;
  	u8 *skb_head = skb->data;
  	int skb_len = skb->len;
  	unsigned int snaplen, res;
 -	unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
 +	unsigned long status = TP_STATUS_USER;
  	unsigned short macoff, netoff, hdrlen;
  	struct sk_buff *copy_skb = NULL;
  	struct timeval tv;
@@@ -1793,46 -1036,37 +1796,46 @@@
  			po->tp_reserve;
  		macoff = netoff - maclen;
  	}
 -
 -	if (macoff + snaplen > po->rx_ring.frame_size) {
 -		if (po->copy_thresh &&
 -		    atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
 -		    (unsigned)sk->sk_rcvbuf) {
 -			if (skb_shared(skb)) {
 -				copy_skb = skb_clone(skb, GFP_ATOMIC);
 -			} else {
 -				copy_skb = skb_get(skb);
 -				skb_head = skb->data;
 +	if (po->tp_version <= TPACKET_V2) {
 +		if (macoff + snaplen > po->rx_ring.frame_size) {
 +			if (po->copy_thresh &&
 +				atomic_read(&sk->sk_rmem_alloc) + skb->truesize
 +				< (unsigned)sk->sk_rcvbuf) {
 +				if (skb_shared(skb)) {
 +					copy_skb = skb_clone(skb, GFP_ATOMIC);
 +				} else {
 +					copy_skb = skb_get(skb);
 +					skb_head = skb->data;
 +				}
 +				if (copy_skb)
 +					skb_set_owner_r(copy_skb, sk);
  			}
 -			if (copy_skb)
 -				skb_set_owner_r(copy_skb, sk);
 +			snaplen = po->rx_ring.frame_size - macoff;
 +			if ((int)snaplen < 0)
 +				snaplen = 0;
  		}
 -		snaplen = po->rx_ring.frame_size - macoff;
 -		if ((int)snaplen < 0)
 -			snaplen = 0;
  	}
 -
  	spin_lock(&sk->sk_receive_queue.lock);
 -	h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
 +	h.raw = packet_current_rx_frame(po, skb,
 +					TP_STATUS_KERNEL, (macoff+snaplen));
  	if (!h.raw)
  		goto ring_is_full;
 -	packet_increment_head(&po->rx_ring);
 +	if (po->tp_version <= TPACKET_V2) {
 +		packet_increment_rx_head(po, &po->rx_ring);
 +	/*
 +	 * LOSING will be reported till you read the stats,
 +	 * because it's COR - Clear On Read.
 +	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
 +	 * at packet level.
 +	 */
 +		if (po->stats.tp_drops)
 +			status |= TP_STATUS_LOSING;
 +	}
  	po->stats.tp_packets++;
  	if (copy_skb) {
  		status |= TP_STATUS_COPY;
  		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
  	}
 -	if (!po->stats.tp_drops)
 -		status &= ~TP_STATUS_LOSING;
  	spin_unlock(&sk->sk_receive_queue.lock);
  
  	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
@@@ -1883,29 -1117,6 +1886,29 @@@
  		h.h2->tp_padding = 0;
  		hdrlen = sizeof(*h.h2);
  		break;
 +	case TPACKET_V3:
 +		/* tp_nxt_offset,vlan are already populated above.
 +		 * So DONT clear those fields here
 +		 */
 +		h.h3->tp_status |= status;
 +		h.h3->tp_len = skb->len;
 +		h.h3->tp_snaplen = snaplen;
 +		h.h3->tp_mac = macoff;
 +		h.h3->tp_net = netoff;
 +		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
 +				&& shhwtstamps->syststamp.tv64)
 +			ts = ktime_to_timespec(shhwtstamps->syststamp);
 +		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
 +				&& shhwtstamps->hwtstamp.tv64)
 +			ts = ktime_to_timespec(shhwtstamps->hwtstamp);
 +		else if (skb->tstamp.tv64)
 +			ts = ktime_to_timespec(skb->tstamp);
 +		else
 +			getnstimeofday(&ts);
 +		h.h3->tp_sec  = ts.tv_sec;
 +		h.h3->tp_nsec = ts.tv_nsec;
 +		hdrlen = sizeof(*h.h3);
 +		break;
  	default:
  		BUG();
  	}
@@@ -1926,19 -1137,13 +1929,19 @@@
  	{
  		u8 *start, *end;
  
 -		end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
 -		for (start = h.raw; start < end; start += PAGE_SIZE)
 -			flush_dcache_page(pgv_to_page(start));
 +		if (po->tp_version <= TPACKET_V2) {
 +			end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
 +				+ macoff + snaplen);
 +			for (start = h.raw; start < end; start += PAGE_SIZE)
 +				flush_dcache_page(pgv_to_page(start));
 +		}
  		smp_wmb();
  	}
  #endif
 -	__packet_set_status(po, h.raw, status);
 +	if (po->tp_version <= TPACKET_V2)
 +		__packet_set_status(po, h.raw, status);
 +	else
 +		prb_clear_blk_fill_status(&po->rx_ring);
  
  	sk->sk_data_ready(sk, 0);
  
@@@ -2429,7 -1634,7 +2432,7 @@@ static int packet_release(struct socke
  	struct sock *sk = sock->sk;
  	struct packet_sock *po;
  	struct net *net;
 -	struct tpacket_req req;
 +	union tpacket_req_u req_u;
  
  	if (!sk)
  		return 0;
@@@ -2452,13 -1657,13 +2455,13 @@@
  
  	packet_flush_mclist(sk);
  
 -	memset(&req, 0, sizeof(req));
 +	memset(&req_u, 0, sizeof(req_u));
  
  	if (po->rx_ring.pg_vec)
 -		packet_set_ring(sk, &req, 1, 0);
 +		packet_set_ring(sk, &req_u, 1, 0);
  
  	if (po->tx_ring.pg_vec)
 -		packet_set_ring(sk, &req, 1, 1);
 +		packet_set_ring(sk, &req_u, 1, 1);
  
  	fanout_release(sk);
  
@@@ -3078,27 -2283,15 +3081,27 @@@ packet_setsockopt(struct socket *sock, 
  	case PACKET_RX_RING:
  	case PACKET_TX_RING:
  	{
 -		struct tpacket_req req;
 +		union tpacket_req_u req_u;
 +		int len;
  
 -		if (optlen < sizeof(req))
 +		switch (po->tp_version) {
 +		case TPACKET_V1:
 +		case TPACKET_V2:
 +			len = sizeof(req_u.req);
 +			break;
 +		case TPACKET_V3:
 +		default:
 +			len = sizeof(req_u.req3);
 +			break;
 +		}
 +		if (optlen < len)
  			return -EINVAL;
  		if (pkt_sk(sk)->has_vnet_hdr)
  			return -EINVAL;
 -		if (copy_from_user(&req, optval, sizeof(req)))
 +		if (copy_from_user(&req_u.req, optval, len))
  			return -EFAULT;
 -		return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
 +		return packet_set_ring(sk, &req_u, 0,
 +			optname == PACKET_TX_RING);
  	}
  	case PACKET_COPY_THRESH:
  	{
@@@ -3125,7 -2318,6 +3128,7 @@@
  		switch (val) {
  		case TPACKET_V1:
  		case TPACKET_V2:
 +		case TPACKET_V3:
  			po->tp_version = val;
  			return 0;
  		default:
@@@ -3235,7 -2427,6 +3238,7 @@@ static int packet_getsockopt(struct soc
  	struct packet_sock *po = pkt_sk(sk);
  	void *data;
  	struct tpacket_stats st;
 +	union tpacket_stats_u st_u;
  
  	if (level != SOL_PACKET)
  		return -ENOPROTOOPT;
@@@ -3248,27 -2439,15 +3251,27 @@@
  
  	switch (optname) {
  	case PACKET_STATISTICS:
 -		if (len > sizeof(struct tpacket_stats))
 -			len = sizeof(struct tpacket_stats);
 +		if (po->tp_version == TPACKET_V3) {
 +			len = sizeof(struct tpacket_stats_v3);
 +		} else {
 +			if (len > sizeof(struct tpacket_stats))
 +				len = sizeof(struct tpacket_stats);
 +		}
  		spin_lock_bh(&sk->sk_receive_queue.lock);
 -		st = po->stats;
 +		if (po->tp_version == TPACKET_V3) {
 +			memcpy(&st_u.stats3, &po->stats,
 +			sizeof(struct tpacket_stats));
 +			st_u.stats3.tp_freeze_q_cnt =
 +			po->stats_u.stats3.tp_freeze_q_cnt;
 +			st_u.stats3.tp_packets += po->stats.tp_drops;
 +			data = &st_u.stats3;
 +		} else {
 +			st = po->stats;
 +			st.tp_packets += st.tp_drops;
 +			data = &st;
 +		}
  		memset(&po->stats, 0, sizeof(st));
  		spin_unlock_bh(&sk->sk_receive_queue.lock);
 -		st.tp_packets += st.tp_drops;
 -
 -		data = &st;
  		break;
  	case PACKET_AUXDATA:
  		if (len > sizeof(int))
@@@ -3309,9 -2488,6 +3312,9 @@@
  		case TPACKET_V2:
  			val = sizeof(struct tpacket2_hdr);
  			break;
 +		case TPACKET_V3:
 +			val = sizeof(struct tpacket3_hdr);
 +			break;
  		default:
  			return -EINVAL;
  		}
@@@ -3468,8 -2644,7 +3471,8 @@@ static unsigned int packet_poll(struct 
  
  	spin_lock_bh(&sk->sk_receive_queue.lock);
  	if (po->rx_ring.pg_vec) {
 -		if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
 +		if (!packet_previous_rx_frame(po, &po->rx_ring,
 +			TP_STATUS_KERNEL))
  			mask |= POLLIN | POLLRDNORM;
  	}
  	spin_unlock_bh(&sk->sk_receive_queue.lock);
@@@ -3588,7 -2763,7 +3591,7 @@@ out_free_pgvec
  	goto out;
  }
  
 -static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
 +static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
  		int closing, int tx_ring)
  {
  	struct pgv *pg_vec = NULL;
@@@ -3597,15 -2772,7 +3600,15 @@@
  	struct packet_ring_buffer *rb;
  	struct sk_buff_head *rb_queue;
  	__be16 num;
 -	int err;
 +	int err = -EINVAL;
 +	/* Added to avoid minimal code churn */
 +	struct tpacket_req *req = &req_u->req;
 +
 +	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
 +	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
 +		WARN(1, "Tx-ring is not supported.\n");
 +		goto out;
 +	}
  
  	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
  	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
@@@ -3631,9 -2798,6 +3634,9 @@@
  		case TPACKET_V2:
  			po->tp_hdrlen = TPACKET2_HDRLEN;
  			break;
 +		case TPACKET_V3:
 +			po->tp_hdrlen = TPACKET3_HDRLEN;
 +			break;
  		}
  
  		err = -EINVAL;
@@@ -3659,17 -2823,6 +3662,17 @@@
  		pg_vec = alloc_pg_vec(req, order);
  		if (unlikely(!pg_vec))
  			goto out;
 +		switch (po->tp_version) {
 +		case TPACKET_V3:
 +		/* Transmit path is not supported. We checked
 +		 * it above but just being paranoid
 +		 */
 +			if (!tx_ring)
 +				init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
 +				break;
 +		default:
 +			break;
 +		}
  	}
  	/* Done */
  	else {
@@@ -3722,11 -2875,7 +3725,11 @@@
  		register_prot_hook(sk);
  	}
  	spin_unlock(&po->bind_lock);
 -
 +	if (closing && (po->tp_version > TPACKET_V2)) {
 +		/* Because we don't support block-based V3 on tx-ring */
 +		if (!tx_ring)
 +			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
 +	}
  	release_sock(sk);
  
  	if (pg_vec)

-- 
LinuxNextTracking


More information about the linux-merge mailing list