The following commit has been merged in the master branch: commit 49b21301f6393c64784d45b8eb9f17985887a9e9 Merge: 6ea2c10707dc50aa7b038f3ae9eb963f79664c9e 8b1857357acd919b9a7fa391afbea30123fdfaec Author: Stephen Rothwell sfr@canb.auug.org.au Date: Tue Oct 25 17:17:57 2011 +1100
Merge remote-tracking branch 'net-next/master'
Conflicts: arch/powerpc/configs/40x/hcu4_defconfig drivers/s390/cio/qdio_main.c
diff --combined Documentation/feature-removal-schedule.txt index ead08f1,d5ac362..7c799fc --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@@ -495,6 -495,29 +495,6 @@@ Who: Jean Delvare <khali@linux-fr.org
----------------------------
-What: Support for UVCIOC_CTRL_ADD in the uvcvideo driver -When: 3.2 -Why: The information passed to the driver by this ioctl is now queried - dynamically from the device. -Who: Laurent Pinchart laurent.pinchart@ideasonboard.com - ----------------------------- - -What: Support for UVCIOC_CTRL_MAP_OLD in the uvcvideo driver -When: 3.2 -Why: Used only by applications compiled against older driver versions. - Superseded by UVCIOC_CTRL_MAP which supports V4L2 menu controls. -Who: Laurent Pinchart laurent.pinchart@ideasonboard.com - ----------------------------- - -What: Support for UVCIOC_CTRL_GET and UVCIOC_CTRL_SET in the uvcvideo driver -When: 3.2 -Why: Superseded by the UVCIOC_CTRL_QUERY ioctl. -Who: Laurent Pinchart laurent.pinchart@ideasonboard.com - ----------------------------- - What: Support for driver specific ioctls in the pwc driver (everything defined in media/pwc-ioctl.h) When: 3.3 @@@ -571,9 -594,18 +571,18 @@@ Why: In 3.0, we can now autodetect i Who: Lee, Chun-Yi jlee@novell.com
---------------------------- + What: The XFS nodelaylog mount option When: 3.3 Why: The delaylog mode that has been the default since 2.6.39 has proven stable, and the old code is in the way of additional improvements in the log code. Who: Christoph Hellwig hch@lst.de + + ---------------------------- + + What: iwlagn alias support + When: 3.5 + Why: The iwlagn module has been renamed iwlwifi. The alias will be around + for backward compatibility for several cycles and then dropped. + Who: Don Fry donald.h.fry@intel.com diff --combined MAINTAINERS index 0d7036c,5008b08..3cceec1 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -117,20 -117,20 +117,20 @@@ Maintainers List (try to look for most M: Philip Blundell philb@gnu.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/3c505* + F: drivers/net/ethernet/i825xx/3c505*
3C59X NETWORK DRIVER M: Steffen Klassert klassert@mathematik.tu-chemnitz.de L: netdev@vger.kernel.org S: Maintained F: Documentation/networking/vortex.txt - F: drivers/net/3c59x.c + F: drivers/net/ethernet/3com/3c59x.c
3CR990 NETWORK DRIVER M: David Dillow dave@thedillows.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/typhoon* + F: drivers/net/ethernet/3com/typhoon*
3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS) M: Adam Radford linuxraid@lsi.com @@@ -156,7 -156,7 +156,7 @@@ M: Realtek linux nic maintainers <nic_s M: Francois Romieu romieu@fr.zoreil.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/r8169.c + F: drivers/net/ethernet/realtek/r8169.c
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER M: Greg Kroah-Hartman gregkh@suse.de @@@ -170,8 -170,7 +170,7 @@@ F: include/linux/serial_8250. 8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.] L: netdev@vger.kernel.org S: Orphan / Obsolete - F: drivers/net/*8390* - F: drivers/net/ax88796.c + F: drivers/net/ethernet/8390/
9P FILE SYSTEM M: Eric Van Hensbergen ericvh@gmail.com @@@ -214,7 -213,7 +213,7 @@@ ACENIC DRIVE M: Jes Sorensen jes@trained-monkey.org L: linux-acenic@sunsite.dk S: Maintained - F: drivers/net/acenic* + F: drivers/net/ethernet/alteon/acenic*
ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER M: Peter Feuerer peter@piie.net @@@ -688,12 -687,6 +687,12 @@@ F: drivers/mtd/nand/bcm_umi_nand. F: drivers/mtd/nand/bcm_umi_bch.c F: drivers/mtd/nand/nand_bcm_umi.h
+ARM/CALXEDA HIGHBANK ARCHITECTURE +M: Rob Herring rob.herring@calxeda.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-highbank/ + ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT M: Anton Vorontsov avorontsov@mvista.com S: Maintained @@@ -752,7 -745,7 +751,7 @@@ L: linux-arm-kernel@lists.infradead.or W: http://www.arm.linux.org.uk/ S: Maintained F: arch/arm/mach-ebsa110/ - F: drivers/net/arm/am79c961a.* + F: drivers/net/ethernet/amd/am79c961a.*
ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6) M: Daniel Ribeiro drwyrm@gmail.com @@@ -793,13 -786,6 +792,13 @@@ L: linux-arm-kernel@lists.infradead.or S: Maintained F: arch/arm/mach-mx5/
+ARM/FREESCALE IMX6 +M: Shawn Guo shawn.guo@linaro.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +T: git git://git.linaro.org/people/shawnguo/linux-2.6.git +F: arch/arm/mach-imx/*imx6* + ARM/GLOMATION GESBC9312SX MACHINE SUPPORT M: Lennert Buytenhek kernel@wantstofly.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -1028,7 -1014,8 +1027,8 @@@ F: arch/arm/include/asm/hardware/ioc. F: arch/arm/include/asm/hardware/iomd.h F: arch/arm/include/asm/hardware/memc.h F: arch/arm/mach-rpc/ - F: drivers/net/arm/ether* + F: drivers/net/ethernet/i825xx/ether1* + F: drivers/net/ethernet/seeq/ether3* F: drivers/scsi/arm/
ARM/SHARK MACHINE SUPPORT @@@ -1097,24 -1084,6 +1097,24 @@@ F: arch/arm/plat-s5p/dev-fimc F: arch/arm/plat-samsung/include/plat/*fimc* F: drivers/media/video/s5p-fimc/
+ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT +M: Kyungmin Park kyungmin.park@samsung.com +M: Kamil Debski k.debski@samsung.com +L: linux-arm-kernel@lists.infradead.org +L: linux-media@vger.kernel.org +S: Maintained +F: arch/arm/plat-s5p/dev-mfc.c +F: drivers/media/video/s5p-mfc/ + +ARM/SAMSUNG S5P SERIES TV SUBSYSTEM SUPPORT +M: Kyungmin Park kyungmin.park@samsung.com +M: Tomasz Stanislawski t.stanislaws@samsung.com +L: linux-arm-kernel@lists.infradead.org +L: linux-media@vger.kernel.org +S: Maintained +F: arch/arm/plat-s5p/dev-tv.c +F: drivers/media/video/s5p-tv/ + ARM/SHMOBILE ARM ARCHITECTURE M: Paul Mundt lethal@linux-sh.org M: Magnus Damm magnus.damm@gmail.com @@@ -1158,7 -1127,7 +1158,7 @@@ F: arch/arm/mach-nuc93x F: drivers/input/keyboard/w90p910_keypad.c F: drivers/input/touchscreen/w90p910_ts.c F: drivers/watchdog/nuc900_wdt.c - F: drivers/net/arm/w90p910_ether.c + F: drivers/net/ethernet/nuvoton/w90p910_ether.c F: drivers/mtd/nand/nuc900_nand.c F: drivers/rtc/rtc-nuc900.c F: drivers/spi/spi_nuc900.c @@@ -1261,7 -1230,7 +1261,7 @@@ F: Documentation/aoe F: drivers/block/aoe/
ATHEROS ATH GENERIC UTILITIES - M: "Luis R. Rodriguez" lrodriguez@atheros.com + M: "Luis R. Rodriguez" mcgrof@qca.qualcomm.com L: linux-wireless@vger.kernel.org S: Supported F: drivers/net/wireless/ath/* @@@ -1269,7 -1238,7 +1269,7 @@@ ATHEROS ATH5K WIRELESS DRIVER M: Jiri Slaby jirislaby@gmail.com M: Nick Kossifidis mickflemm@gmail.com - M: "Luis R. Rodriguez" lrodriguez@atheros.com + M: "Luis R. Rodriguez" mcgrof@qca.qualcomm.com M: Bob Copeland me@bobcopeland.com L: linux-wireless@vger.kernel.org L: ath5k-devel@lists.ath5k.org @@@ -1277,11 -1246,19 +1277,19 @@@ W: http://wireless.kernel.org/en/users/ S: Maintained F: drivers/net/wireless/ath/ath5k/
+ ATHEROS ATH6KL WIRELESS DRIVER + M: Kalle Valo kvalo@qca.qualcomm.com + L: linux-wireless@vger.kernel.org + W: http://wireless.kernel.org/en/users/Drivers/ath6kl + T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git + S: Supported + F: drivers/net/wireless/ath/ath6kl/ + ATHEROS ATH9K WIRELESS DRIVER - M: "Luis R. Rodriguez" lrodriguez@atheros.com - M: Jouni Malinen jmalinen@atheros.com - M: Vasanthakumar Thiagarajan vasanth@atheros.com - M: Senthil Balasubramanian senthilkumar@atheros.com + M: "Luis R. Rodriguez" mcgrof@qca.qualcomm.com + M: Jouni Malinen jouni@qca.qualcomm.com + M: Vasanthakumar Thiagarajan vthiagar@qca.qualcomm.com + M: Senthil Balasubramanian senthilb@qca.qualcomm.com L: linux-wireless@vger.kernel.org L: ath9k-devel@lists.ath9k.org W: http://wireless.kernel.org/en/users/Drivers/ath9k @@@ -1313,7 -1290,7 +1321,7 @@@ L: netdev@vger.kernel.or W: http://sourceforge.net/projects/atl1 W: http://atl1.sourceforge.net S: Maintained - F: drivers/net/atlx/ + F: drivers/net/ethernet/atheros/
ATM M: Chas Williams chas@cmf.nrl.navy.mil @@@ -1353,7 -1330,7 +1361,7 @@@ F: include/video/atmel_lcdc. ATMEL MACB ETHERNET DRIVER M: Nicolas Ferre nicolas.ferre@atmel.com S: Supported - F: drivers/net/macb.* + F: drivers/net/ethernet/cadence/
ATMEL SPI DRIVER M: Nicolas Ferre nicolas.ferre@atmel.com @@@ -1476,7 -1453,7 +1484,7 @@@ BLACKFIN EMAC DRIVE L: uclinux-dist-devel@blackfin.uclinux.org W: http://blackfin.uclinux.org S: Supported - F: drivers/net/bfin_mac.* + F: drivers/net/ethernet/adi/
BLACKFIN RTC DRIVER M: Mike Frysinger vapier.adi@gmail.com @@@ -1557,27 -1534,27 +1565,27 @@@ BROADCOM B44 10/100 ETHERNET DRIVE M: Gary Zambrano zambrano@broadcom.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/b44.* + F: drivers/net/ethernet/broadcom/b44.*
BROADCOM BNX2 GIGABIT ETHERNET DRIVER M: Michael Chan mchan@broadcom.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/bnx2.* - F: drivers/net/bnx2_* + F: drivers/net/ethernet/broadcom/bnx2.* + F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER M: Eilon Greenstein eilong@broadcom.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/bnx2x/ + F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM TG3 GIGABIT ETHERNET DRIVER M: Matt Carlson mcarlson@broadcom.com M: Michael Chan mchan@broadcom.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/tg3.* + F: drivers/net/ethernet/broadcom/tg3.*
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER M: Brett Rudley brudley@broadcom.com @@@ -1606,7 -1583,7 +1614,7 @@@ BROCADE BNA 10 GIGABIT ETHERNET DRIVE M: Rasesh Mody rmody@brocade.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/bna/ + F: drivers/net/ethernet/brocade/bna/
BSG (block layer generic sg v4 driver) M: FUJITA Tomonori fujita.tomonori@lab.ntt.co.jp @@@ -1655,14 -1632,6 +1663,14 @@@ T: git git://git.alsa-project.org/alsa- S: Maintained F: sound/pci/oxygen/
+C6X ARCHITECTURE +M: Mark Salter msalter@redhat.com +M: Aurelien Jacquiot a-jacquiot@ti.com +L: linux-c6x-dev@linux-c6x.org +W: http://www.linux-c6x.org/wiki/index.php/Main_Page +S: Maintained +F: arch/c6x/ + CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS M: David Howells dhowells@redhat.com L: linux-cachefs@redhat.com @@@ -1702,7 -1671,7 +1710,7 @@@ CAN NETWORK LAYE M: Oliver Hartkopp socketcan@hartkopp.net M: Oliver Hartkopp oliver.hartkopp@volkswagen.de M: Urs Thuermann urs.thuermann@volkswagen.de - L: socketcan-core@lists.berlios.de (subscribers-only) + L: linux-can@vger.kernel.org L: netdev@vger.kernel.org W: http://developer.berlios.de/projects/socketcan/ S: Maintained @@@ -1714,7 -1683,7 +1722,7 @@@ F: include/linux/can/raw.
CAN NETWORK DRIVERS M: Wolfgang Grandegger wg@grandegger.com - L: socketcan-core@lists.berlios.de (subscribers-only) + L: linux-can@vger.kernel.org L: netdev@vger.kernel.org W: http://developer.berlios.de/projects/socketcan/ S: Maintained @@@ -1798,13 -1767,13 +1806,13 @@@ M: Christian Benvenuti <benve@cisco.com M: Roopa Prabhu roprabhu@cisco.com M: David Wang dwang2@cisco.com S: Supported - F: drivers/net/enic/ + F: drivers/net/ethernet/cisco/enic/
CIRRUS LOGIC EP93XX ETHERNET DRIVER M: Hartley Sweeten hsweeten@visionengravers.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/arm/ep93xx_eth.c + F: drivers/net/ethernet/cirrus/ep93xx_eth.c
CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER M: Lennert Buytenhek kernel@wantstofly.org @@@ -1944,7 -1913,7 +1952,7 @@@ CPMAC ETHERNET DRIVE M: Florian Fainelli florian@openwrt.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/cpmac.c + F: drivers/net/ethernet/ti/cpmac.c
CPU FREQUENCY DRIVERS M: Dave Jones davej@redhat.com @@@ -2031,7 -2000,7 +2039,7 @@@ M: Divy Le Ray <divy@chelsio.com L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported - F: drivers/net/cxgb3/ + F: drivers/net/ethernet/chelsio/cxgb3/
CXGB3 IWARP RNIC DRIVER (IW_CXGB3) M: Steve Wise swise@chelsio.com @@@ -2045,7 -2014,7 +2053,7 @@@ M: Dimitris Michailidis <dm@chelsio.com L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported - F: drivers/net/cxgb4/ + F: drivers/net/ethernet/chelsio/cxgb4/
CXGB4 IWARP RNIC DRIVER (IW_CXGB4) M: Steve Wise swise@chelsio.com @@@ -2059,14 -2028,14 +2067,14 @@@ M: Casey Leedom <leedom@chelsio.com L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported - F: drivers/net/cxgb4vf/ + F: drivers/net/ethernet/chelsio/cxgb4vf/
STMMAC ETHERNET DRIVER M: Giuseppe Cavallaro peppe.cavallaro@st.com L: netdev@vger.kernel.org W: http://www.stlinux.com S: Supported - F: drivers/net/stmmac/ + F: drivers/net/ethernet/stmicro/stmmac/
CYBERPRO FB DRIVER M: Russell King linux@arm.linux.org.uk @@@ -2110,7 -2079,7 +2118,7 @@@ DAVICOM FAST ETHERNET (DMFE) NETWORK DR L: netdev@vger.kernel.org S: Orphan F: Documentation/networking/dmfe.txt - F: drivers/net/tulip/dmfe.c + F: drivers/net/ethernet/tulip/dmfe.c
DC390/AM53C974 SCSI driver M: Kurt Garloff garloff@suse.de @@@ -2149,7 -2118,7 +2157,7 @@@ F: net/decnet DEFXX FDDI NETWORK DRIVER M: "Maciej W. Rozycki" macro@linux-mips.org S: Maintained - F: drivers/net/defxx.* + F: drivers/net/fddi/defxx.*
DELL LAPTOP DRIVER M: Matthew Garrett mjg59@srcf.ucam.org @@@ -2499,10 -2468,10 +2507,10 @@@ S: Supporte F: drivers/infiniband/hw/ehca/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER -M: Breno Leitao leitao@linux.vnet.ibm.com +M: Thadeu Lima de Souza Cascardo cascardo@linux.vnet.ibm.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/ehea/ + F: drivers/net/ethernet/ibm/ehea/
EMBEDDED LINUX M: Paul Gortmaker paul.gortmaker@windriver.com @@@ -2547,7 -2516,7 +2555,7 @@@ ETHEREXPRESS-16 NETWORK DRIVE M: Philip Blundell philb@gnu.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/eexpress.* + F: drivers/net/ethernet/i825xx/eexpress.*
ETHERNET BRIDGE M: Stephen Hemminger shemminger@linux-foundation.org @@@ -2561,7 -2530,7 +2569,7 @@@ F: net/bridge ETHERTEAM 16I DRIVER M: Mika Kuoppala miku@iki.fi S: Maintained - F: drivers/net/eth16i.c + F: drivers/net/ethernet/fujitsu/eth16i.c
EXT2 FILE SYSTEM M: Jan Kara jack@suse.cz @@@ -2725,7 -2694,7 +2733,7 @@@ M: Vitaly Bordug <vbordug@ru.mvista.com L: linuxppc-dev@lists.ozlabs.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/fs_enet/ + F: drivers/net/ethernet/freescale/fs_enet/ F: include/linux/fs_enet_pd.h
FREESCALE QUICC ENGINE LIBRARY @@@ -2747,7 -2716,7 +2755,7 @@@ M: Li Yang <leoli@freescale.com L: netdev@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained - F: drivers/net/ucc_geth* + F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE QUICC ENGINE UCC UART DRIVER M: Timur Tabi timur@freescale.com @@@ -3085,6 -3054,7 +3093,7 @@@ S: Maintaine F: include/linux/hippidevice.h F: include/linux/if_hippi.h F: net/802/hippi.c + F: drivers/net/hippi/
HOST AP DRIVER M: Jouni Malinen j@w1.fi @@@ -3102,7 -3072,7 +3111,7 @@@ F: drivers/platform/x86/tc1100-wmi. HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series M: Jaroslav Kysela perex@perex.cz S: Maintained - F: drivers/net/hp100.* + F: drivers/net/ethernet/hp/hp100.*
HPET: High Precision Event Timers driver M: Clemens Ladisch clemens@ladisch.de @@@ -3180,7 -3150,8 +3189,7 @@@ IA64 (Itanium) PLATFOR M: Tony Luck tony.luck@intel.com M: Fenghua Yu fenghua.yu@intel.com L: linux-ia64@vger.kernel.org -W: http://www.ia64-linux.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git S: Maintained F: arch/ia64/
@@@ -3199,7 -3170,7 +3208,7 @@@ IBM Power Virtual Ethernet Device Drive M: Santiago Leon santil@linux.vnet.ibm.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/ibmveth.* + F: drivers/net/ethernet/ibm/ibmveth.*
IBM ServeRAID RAID DRIVER P: Jack Hammer @@@ -3351,7 -3322,7 +3360,7 @@@ M: David Woodhouse <dwmw2@infradead.org L: iommu@lists.linux-foundation.org T: git git://git.infradead.org/iommu-2.6.git S: Supported -F: drivers/pci/intel-iommu.c +F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h
INTEL IOP-ADMA DMA DRIVER @@@ -3366,7 -3337,7 +3375,7 @@@ F: arch/arm/mach-ixp4xx/include/mach/qm F: arch/arm/mach-ixp4xx/include/mach/npe.h F: arch/arm/mach-ixp4xx/ixp4xx_qmgr.c F: arch/arm/mach-ixp4xx/ixp4xx_npe.c - F: drivers/net/arm/ixp4xx_eth.c + F: drivers/net/ethernet/xscale/ixp4xx_eth.c F: drivers/net/wan/ixp4xx_hss.c
INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT @@@ -3378,7 -3349,7 +3387,7 @@@ INTEL IXP2000 ETHERNET DRIVE M: Lennert Buytenhek kernel@wantstofly.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/ixp2000/ + F: drivers/net/ethernet/xscale/ixp2000/
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf) M: Jeff Kirsher jeffrey.t.kirsher@intel.com @@@ -3387,13 -3358,13 +3396,13 @@@ M: Bruce Allan <bruce.w.allan@intel.com M: Carolyn Wyborny carolyn.wyborny@intel.com M: Don Skidmore donald.c.skidmore@intel.com M: Greg Rose gregory.v.rose@intel.com - M: PJ Waskiewicz peter.p.waskiewicz.jr@intel.com + M: Peter P Waskiewicz Jr peter.p.waskiewicz.jr@intel.com M: Alex Duyck alexander.h.duyck@intel.com M: John Ronciak john.ronciak@intel.com L: e1000-devel@lists.sourceforge.net W: http://e1000.sourceforge.net/ - T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git - T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git S: Supported F: Documentation/networking/e100.txt F: Documentation/networking/e1000.txt @@@ -3403,14 -3374,7 +3412,7 @@@ F: Documentation/networking/igbvf.tx F: Documentation/networking/ixgb.txt F: Documentation/networking/ixgbe.txt F: Documentation/networking/ixgbevf.txt - F: drivers/net/e100.c - F: drivers/net/e1000/ - F: drivers/net/e1000e/ - F: drivers/net/igb/ - F: drivers/net/igbvf/ - F: drivers/net/ixgb/ - F: drivers/net/ixgbe/ - F: drivers/net/ixgbevf/ + F: drivers/net/ethernet/intel/
INTEL MRST PMU DRIVER M: Len Brown len.brown@intel.com @@@ -3462,7 -3426,7 +3464,7 @@@ M: Wey-Yi Guy <wey-yi.w.guy@intel.com M: Intel Linux Wireless ilw@linux.intel.com L: linux-wireless@vger.kernel.org W: http://intellinuxwireless.org - T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-2.6.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git S: Supported F: drivers/net/wireless/iwlwifi/
@@@ -3478,7 -3442,7 +3480,7 @@@ IOC3 ETHERNET DRIVE M: Ralf Baechle ralf@linux-mips.org L: linux-mips@linux-mips.org S: Maintained - F: drivers/net/ioc3-eth.c + F: drivers/net/ethernet/sgi/ioc3-eth.c
IOC3 SERIAL DRIVER M: Pat Gefre pfg@sgi.com @@@ -3496,7 -3460,7 +3498,7 @@@ M: Francois Romieu <romieu@fr.zoreil.co M: Sorbica Shieh sorbica@icplus.com.tw L: netdev@vger.kernel.org S: Maintained - F: drivers/net/ipg.* + F: drivers/net/ethernet/icplus/ipg.*
IPATH DRIVER M: Mike Marciniszyn infinipath@qlogic.com @@@ -3644,7 -3608,7 +3646,7 @@@ JME NETWORK DRIVE M: Guo-Fu Tseng cooldavid@cooldavid.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/jme.* + F: drivers/net/ethernet/jme.*
JOURNALLING FLASH FILE SYSTEM V2 (JFFS2) M: David Woodhouse dwmw2@infradead.org @@@ -4066,7 -4030,6 +4068,7 @@@ F: fs/partitions/ldm.
LogFS M: Joern Engel joern@logfs.org +M: Prasad Joshi prasadjoshi.linux@gmail.com L: logfs@logfs.org W: logfs.org S: Maintained @@@ -4176,7 -4139,7 +4178,7 @@@ MARVELL MV643XX ETHERNET DRIVE M: Lennert Buytenhek buytenh@wantstofly.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/mv643xx_eth.* + F: drivers/net/ethernet/marvell/mv643xx_eth.* F: include/linux/mv643xx.h
MARVELL MWIFIEX WIRELESS DRIVER @@@ -4390,12 -4353,12 +4392,12 @@@ M: Andrew Gallatin <gallatin@myri.com L: netdev@vger.kernel.org W: http://www.myri.com/scs/download-Myri10GE.html S: Supported - F: drivers/net/myri10ge/ + F: drivers/net/ethernet/myricom/myri10ge/
NATSEMI ETHERNET DRIVER (DP8381x) M: Tim Hockin thockin@hockin.org S: Maintained - F: drivers/net/natsemi.c + F: drivers/net/ethernet/natsemi/natsemi.c
NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER M: Daniel Mack zonque@gmail.com @@@ -4435,9 -4398,8 +4437,8 @@@ W: http://trac.neterion.com/cgi-bin/tra W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous S: Supported F: Documentation/networking/s2io.txt - F: drivers/net/s2io* F: Documentation/networking/vxge.txt - F: drivers/net/vxge/ + F: drivers/net/ethernet/neterion/
NETFILTER/IPTABLES/IPCHAINS P: Rusty Russell @@@ -4551,11 -4513,23 +4552,23 @@@ F: include/linux/if_ F: include/linux/*device.h
NETXEN (1/10) GbE SUPPORT - M: Amit Kumar Salecha amit.salecha@qlogic.com + M: Sony Chacko sony.chacko@qlogic.com + M: Rajesh Borundia rajesh.borundia@qlogic.com L: netdev@vger.kernel.org W: http://www.qlogic.com S: Supported - F: drivers/net/netxen/ + F: drivers/net/ethernet/qlogic/netxen/ + + NFC SUBSYSTEM + M: Lauro Ramos Venancio lauro.venancio@openbossa.org + M: Aloisio Almeida Jr aloisio.almeida@openbossa.org + M: Samuel Ortiz sameo@linux.intel.com + L: linux-wireless@vger.kernel.org + S: Maintained + F: net/nfc/ + F: include/linux/nfc.h + F: include/net/nfc/ + F: drivers/nfc/
NFS, SUNRPC, AND LOCKD CLIENTS M: Trond Myklebust Trond.Myklebust@netapp.com @@@ -4576,7 -4550,7 +4589,7 @@@ M: Jan-Pascal van Best <janpascal@vanbe M: Andreas Mohr andi@lisas.de L: netdev@vger.kernel.org S: Maintained - F: drivers/net/ni5010.* + F: drivers/net/ethernet/racal/ni5010.*
NILFS2 FILESYSTEM M: KONISHI Ryusuke konishi.ryusuke@lab.ntt.co.jp @@@ -4842,7 -4816,7 +4855,7 @@@ PA SEMI ETHERNET DRIVE M: Olof Johansson olof@lixom.net L: netdev@vger.kernel.org S: Maintained - F: drivers/net/pasemi_mac.* + F: drivers/net/ethernet/pasemi/*
PA SEMI SMBUS DRIVER M: Olof Johansson olof@lixom.net @@@ -4989,7 -4963,7 +5002,7 @@@ PCNET32 NETWORK DRIVE M: Don Fry pcnet32@frontier.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/pcnet32.c + F: drivers/net/ethernet/amd/pcnet32.c
PCRYPT PARALLEL CRYPTO ENGINE M: Steffen Klassert steffen.klassert@secunet.com @@@ -5121,7 -5095,7 +5134,7 @@@ PPP PROTOCOL DRIVERS AND COMPRESSOR M: Paul Mackerras paulus@samba.org L: linux-ppp@vger.kernel.org S: Maintained - F: drivers/net/ppp_* + F: drivers/net/ppp/ppp_*
PPP OVER ATM (RFC 2364) M: Mitchell Blank Jr mitch@sfgoth.com @@@ -5132,8 -5106,8 +5145,8 @@@ F: include/linux/atmppp. PPP OVER ETHERNET M: Michal Ostrowski mostrows@earthlink.net S: Maintained - F: drivers/net/pppoe.c - F: drivers/net/pppox.c + F: drivers/net/ppp/pppoe.c + F: drivers/net/ppp/pppox.c
PPP OVER L2TP M: James Chapman jchapman@katalix.com @@@ -5154,7 -5128,7 +5167,7 @@@ PPTP DRIVE M: Dmitry Kozlov xeb@mail.ru L: netdev@vger.kernel.org S: Maintained - F: drivers/net/pptp.c + F: drivers/net/ppp/pptp.c W: http://sourceforge.net/projects/accel-pptp
PREEMPTIBLE KERNEL @@@ -5183,7 -5157,7 +5196,7 @@@ M: Geoff Levand <geoff@infradead.org L: netdev@vger.kernel.org L: cbe-oss-dev@lists.ozlabs.org S: Maintained - F: drivers/net/ps3_gelic_net.* + F: drivers/net/ethernet/toshiba/ps3_gelic_net.*
PS3 PLATFORM SUPPORT M: Geoff Levand geoff@infradead.org @@@ -5301,23 -5275,24 +5314,24 @@@ M: linux-driver@qlogic.co L: netdev@vger.kernel.org S: Supported F: Documentation/networking/LICENSE.qla3xxx - F: drivers/net/qla3xxx.* + F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER - M: Amit Kumar Salecha amit.salecha@qlogic.com M: Anirban Chakraborty anirban.chakraborty@qlogic.com + M: Sony Chacko sony.chacko@qlogic.com M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/qlcnic/ + F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER + M: Anirban Chakraborty anirban.chakraborty@qlogic.com M: Jitendra Kalsaria jitendra.kalsaria@qlogic.com M: Ron Mercer ron.mercer@qlogic.com M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/qlge/ + F: drivers/net/ethernet/qlogic/qlge/
QNX4 FILESYSTEM M: Anders Larsen al@alarsen.net @@@ -5327,12 -5302,6 +5341,12 @@@ F: fs/qnx4 F: include/linux/qnx4_fs.h F: include/linux/qnxtypes.h
+QUALCOMM HEXAGON ARCHITECTURE +M: Richard Kuo rkuo@codeaurora.org +L: linux-hexagon@vger.kernel.org +S: Supported +F: arch/hexagon/ + RADOS BLOCK DEVICE (RBD) F: include/linux/qnxtypes.h M: Yehuda Sadeh yehuda@hq.newdream.net @@@ -5405,7 -5374,7 +5419,7 @@@ RDC R6040 FAST ETHERNET DRIVE M: Florian Fainelli florian@openwrt.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/r6040.c + F: drivers/net/ethernet/rdc/r6040.c
RDS - RELIABLE DATAGRAM SOCKETS M: Andy Grover andy.grover@oracle.com @@@ -5809,7 -5778,7 +5823,7 @@@ M: Ajit Khaparde <ajit.khaparde@emulex. L: netdev@vger.kernel.org W: http://www.emulex.com S: Supported - F: drivers/net/benet/ + F: drivers/net/ethernet/emulex/benet/
SFC NETWORK DRIVER M: Solarflare linux maintainers linux-net-drivers@solarflare.com @@@ -5817,7 -5786,7 +5831,7 @@@ M: Steve Hodgson <shodgson@solarflare.c M: Ben Hutchings bhutchings@solarflare.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/sfc/ + F: drivers/net/ethernet/sfc/
SGI GRU DRIVER M: Jack Steiner steiner@sgi.com @@@ -5883,14 -5852,14 +5897,14 @@@ SIS 190 ETHERNET DRIVE M: Francois Romieu romieu@fr.zoreil.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/sis190.c + F: drivers/net/ethernet/sis/sis190.c
SIS 900/7016 FAST ETHERNET DRIVER M: Daniele Venzano venza@brownhat.org W: http://www.brownhat.org/sis900.html L: netdev@vger.kernel.org S: Maintained - F: drivers/net/sis900.* + F: drivers/net/ethernet/sis/sis900.*
SIS 96X I2C/SMBUS DRIVER M: "Mark M. Hoffman" mhoffman@lightlink.com @@@ -5917,8 -5886,7 +5931,7 @@@ SKGE, SKY2 10/100/1000 GIGABIT ETHERNE M: Stephen Hemminger shemminger@linux-foundation.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/skge.* - F: drivers/net/sky2.* + F: drivers/net/ethernet/marvell/sk*
SLAB ALLOCATOR M: Christoph Lameter cl@linux-foundation.org @@@ -5932,7 -5900,7 +5945,7 @@@ F: mm/sl?b. SMC91x ETHERNET DRIVER M: Nicolas Pitre nico@fluxnic.net S: Odd Fixes - F: drivers/net/smc91x.* + F: drivers/net/ethernet/smsc/smc91x.*
SMM665 HARDWARE MONITOR DRIVER M: Guenter Roeck linux@roeck-us.net @@@ -5967,13 -5935,13 +5980,13 @@@ M: Steve Glendinning <steve.glendinning L: netdev@vger.kernel.org S: Supported F: include/linux/smsc911x.h - F: drivers/net/smsc911x.* + F: drivers/net/ethernet/smsc/smsc911x.*
SMSC9420 PCI ETHERNET DRIVER M: Steve Glendinning steve.glendinning@smsc.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/smsc9420.* + F: drivers/net/ethernet/smsc/smsc9420.*
SN-IA64 (Itanium) SUB-PLATFORM M: Jes Sorensen jes@sgi.com @@@ -6007,7 -5975,7 +6020,7 @@@ SONIC NETWORK DRIVE M: Thomas Bogendoerfer tsbogend@alpha.franken.de L: netdev@vger.kernel.org S: Maintained - F: drivers/net/sonic.* + F: drivers/net/ethernet/natsemi/sonic.*
SONICS SILICON BACKPLANE DRIVER (SSB) M: Michael Buesch m@bues.ch @@@ -6148,7 -6116,7 +6161,7 @@@ M: Jens Osterkamp <jens@de.ibm.com L: netdev@vger.kernel.org S: Supported F: Documentation/networking/spider_net.txt - F: drivers/net/spider_net* + F: drivers/net/ethernet/toshiba/spider_net*
SPU FILE SYSTEM M: Jeremy Kerr jk@ozlabs.org @@@ -6195,12 -6163,6 +6208,6 @@@ M: Jakub Schmidtke <sjakub@gmail.com S: Odd Fixes F: drivers/staging/asus_oled/
- STAGING - ATHEROS ATH6KL WIRELESS DRIVER - M: Luis R. Rodriguez mcgrof@gmail.com - M: Naveen Singh nsingh@atheros.com - S: Odd Fixes - F: drivers/staging/ath6kl/ - STAGING - COMEDI M: Ian Abbott abbotti@mev.co.uk M: Mori Hess fmhess@users.sourceforge.net @@@ -6326,7 -6288,7 +6333,7 @@@ F: drivers/staging/xgifb STARFIRE/DURALAN NETWORK DRIVER M: Ion Badulescu ionut@badula.org S: Odd Fixes - F: drivers/net/starfire* + F: drivers/net/ethernet/adaptec/starfire*
SUN3/3X M: Sam Creasey sammy@sammy.net @@@ -6335,6 -6297,7 +6342,7 @@@ S: Maintaine F: arch/m68k/kernel/*sun3* F: arch/m68k/sun3*/ F: arch/m68k/include/asm/sun3* + F: drivers/net/ethernet/i825xx/sun3*
SUPERH M: Paul Mundt lethal@linux-sh.org @@@ -6411,10 -6374,10 +6419,10 @@@ F: net/ipv4/tcp_lp.
TEGRA SUPPORT M: Colin Cross ccross@android.com -M: Erik Gilling konkers@android.com M: Olof Johansson olof@lixom.net +M: Stephen Warren swarren@nvidia.com L: linux-tegra@vger.kernel.org -T: git git://android.git.kernel.org/kernel/tegra.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git S: Supported F: arch/arm/mach-tegra
@@@ -6422,7 -6385,7 +6430,7 @@@ TEHUTI ETHERNET DRIVE M: Andy Gospodarek andy@greyhouse.net L: netdev@vger.kernel.org S: Supported - F: drivers/net/tehuti* + F: drivers/net/ethernet/tehuti/*
Telecom Clock Driver for MCPL0010 M: Mark Gross mark.gross@intel.com @@@ -6473,7 -6436,7 +6481,7 @@@ W: http://www.tilera.com/scm S: Supported F: arch/tile/ F: drivers/tty/hvc/hvc_tile.c - F: drivers/net/tile/ + F: drivers/net/ethernet/tile/ F: drivers/edac/tile_edac.c
TLAN NETWORK DRIVER @@@ -6482,7 -6445,7 +6490,7 @@@ L: tlan-devel@lists.sourceforge.net (su W: http://sourceforge.net/projects/tlan/ S: Maintained F: Documentation/networking/tlan.txt - F: drivers/net/tlan.* + F: drivers/net/ethernet/ti/tlan.*
TOMOYO SECURITY MODULE M: Kentaro Takeda takedakn@nttdata.co.jp @@@ -6576,7 -6539,7 +6584,7 @@@ TULIP NETWORK DRIVER M: Grant Grundler grundler@parisc-linux.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/tulip/ + F: drivers/net/ethernet/tulip/
TUN/TAP driver M: Maxim Krasnyansky maxk@qualcomm.com @@@ -6622,10 -6585,11 +6630,10 @@@ W: http://uclinux-h8.sourceforge.jp S: Supported F: arch/h8300/ F: drivers/ide/ide-h8300.c - F: drivers/net/ne-h8300.c + F: drivers/net/ethernet/8390/ne-h8300.c
UDF FILESYSTEM M: Jan Kara jack@suse.cz -W: http://linux-udf.sourceforge.net S: Maintained F: Documentation/filesystems/udf.txt F: fs/udf/ @@@ -7049,7 -7013,7 +7057,7 @@@ F: include/linux/vhost. VIA RHINE NETWORK DRIVER M: Roger Luethi rl@hellgate.ch S: Maintained - F: drivers/net/via-rhine.c + F: drivers/net/ethernet/via/via-rhine.c
VIAPRO SMBUS DRIVER M: Jean Delvare khali@linux-fr.org @@@ -7077,7 -7041,7 +7085,7 @@@ VIA VELOCITY NETWORK DRIVE M: Francois Romieu romieu@fr.zoreil.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/via-velocity.* + F: drivers/net/ethernet/via/via-velocity.*
VLAN (802.1Q) M: Patrick McHardy kaber@trash.net @@@ -7186,12 -7150,6 +7194,12 @@@ L: linux-scsi@vger.kernel.or S: Maintained F: drivers/scsi/wd7000.c
+WIIMOTE HID DRIVER +M: David Herrmann dh.herrmann@googlemail.com +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/hid-wiimote* + WINBOND CIR DRIVER M: David Härdeman david@hardeman.nu S: Maintained diff --combined arch/mips/Kconfig index a926484,0dbb4ed..62b9677 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@@ -24,7 -24,6 +24,7 @@@ config MIP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select HAVE_ARCH_JUMP_LABEL + select IRQ_FORCED_THREADING
menu "Machine selection"
@@@ -47,8 -46,6 +47,8 @@@ config MIPS_ALCHEM select GENERIC_GPIO select ARCH_WANT_OPTIONAL_GPIOLIB select SYS_SUPPORTS_ZBOOT + select USB_ARCH_HAS_OHCI + select USB_ARCH_HAS_EHCI
config AR7 bool "Texas Instruments AR7" @@@ -94,15 -91,8 +94,8 @@@ config BCM47X select DMA_NONCOHERENT select HW_HAS_PCI select IRQ_CPU - select SYS_HAS_CPU_MIPS32_R1 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN - select SSB - select SSB_DRIVER_MIPS - select SSB_DRIVER_EXTIF - select SSB_EMBEDDED - select SSB_B43_PCI_BRIDGE if PCI - select SSB_PCICORE_HOSTMODE if PCI select GENERIC_GPIO select SYS_HAS_EARLY_PRINTK select CFE @@@ -215,7 -205,6 +208,7 @@@ config MACH_JZ474 select SYS_HAS_EARLY_PRINTK select HAVE_PWM select HAVE_CLK + select GENERIC_IRQ_CHIP
config LANTIQ bool "Lantiq based platforms" @@@ -726,7 -715,6 +719,7 @@@ config CAVIUM_OCTEON_SIMULATO select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_HOTPLUG_CPU select SYS_HAS_CPU_CAVIUM_OCTEON + select HOLES_IN_ZONE help The Octeon simulator is software performance model of the Cavium Octeon Processor. It supports simulating Octeon processors on x86 @@@ -749,7 -737,6 +742,7 @@@ config CAVIUM_OCTEON_REFERENCE_BOAR select ZONE_DMA32 select USB_ARCH_HAS_OHCI select USB_ARCH_HAS_EHCI + select HOLES_IN_ZONE help This option supports all of the Octeon reference boards from Cavium Networks. It builds a kernel that dynamically determines the Octeon @@@ -794,6 -781,7 +787,7 @@@ endchoic
source "arch/mips/alchemy/Kconfig" source "arch/mips/ath79/Kconfig" + source "arch/mips/bcm47xx/Kconfig" source "arch/mips/bcm63xx/Kconfig" source "arch/mips/jazz/Kconfig" source "arch/mips/jz4740/Kconfig" @@@ -979,9 -967,6 +973,9 @@@ config ISA_DMA_AP config GENERIC_GPIO bool
+config HOLES_IN_ZONE + bool + # # Endianess selection. Sufficiently obscure so many users don't know what to # answer,so we try hard to limit the available choices. Also the use of a @@@ -2101,7 -2086,7 +2095,7 @@@ config NODES_SHIF
config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" - depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && CPU_MIPS32 + depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON) default y help Enable hardware performance counter support for perf events. If diff --combined arch/powerpc/Kconfig index 8523bd1,47682b6..85195e4 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@@ -323,7 -323,7 +323,7 @@@ config SWIOTL
config HOTPLUG_CPU bool "Support for enabling/disabling CPUs" - depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) + depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC || PPC_POWERNV) ---help--- Say Y here to be able to disable and re-enable individual CPUs at runtime on SMP machines. @@@ -345,7 -345,7 +345,7 @@@ config ARCH_ENABLE_MEMORY_HOTREMOV
config KEXEC bool "kexec system call (EXPERIMENTAL)" - depends on (PPC_BOOK3S || FSL_BOOKE) && EXPERIMENTAL + depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !47x)) && EXPERIMENTAL help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@@ -429,7 -429,8 +429,7 @@@ config ARCH_POPULATES_NODE_MA def_bool y
config SYS_SUPPORTS_HUGETLBFS - def_bool y - depends on PPC_BOOK3S_64 + bool
source "mm/Kconfig"
@@@ -655,6 -656,8 +655,8 @@@ config SBU
config FSL_SOC bool + select HAVE_CAN_FLEXCAN if NET && CAN + select PPC_CLOCK if CAN_FLEXCAN
config FSL_PCI bool diff --combined arch/powerpc/configs/ppc40x_defconfig index e9d920c,7cb703b..1eb19ac --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig @@@ -14,6 -14,7 +14,6 @@@ CONFIG_MODULE_UNLOAD= CONFIG_PPC4xx_GPIO=y CONFIG_ACADIA=y CONFIG_EP405=y -CONFIG_HCU4=y CONFIG_HOTFOOT=y CONFIG_KILAUEA=y CONFIG_MAKALU=y @@@ -49,8 -50,9 +49,9 @@@ CONFIG_BLK_DEV_RAM= CONFIG_BLK_DEV_RAM_SIZE=35000 CONFIG_XILINX_SYSACE=m CONFIG_NETDEVICES=y - CONFIG_NET_ETHERNET=y - CONFIG_IBM_NEW_EMAC=y + CONFIG_ETHERNET=y + CONFIG_NET_VENDOR_IBM=y + CONFIG_IBM_EMAC=y # CONFIG_INPUT is not set CONFIG_SERIO=m # CONFIG_SERIO_I8042 is not set diff --combined arch/powerpc/platforms/40x/Kconfig index ae5e0bf,b5d8706..8f9c3e2 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@@ -32,6 -32,14 +32,6 @@@ config EP40 help This option enables support for the EP405/EP405PC boards.
-config HCU4 - bool "Hcu4" - depends on 40x - default n - select 405GPR - help - This option enables support for the Nestal Maschinen HCU4 board. - config HOTFOOT bool "Hotfoot" depends on 40x @@@ -122,21 -130,21 +122,21 @@@ config 405G bool select IBM405_ERR77 select IBM405_ERR51 - select IBM_NEW_EMAC_ZMII + select IBM_EMAC_ZMII
config 405EP bool
config 405EX bool - select IBM_NEW_EMAC_EMAC4 - select IBM_NEW_EMAC_RGMII + select IBM_EMAC_EMAC4 + select IBM_EMAC_RGMII
config 405EZ bool - select IBM_NEW_EMAC_NO_FLOW_CTRL - select IBM_NEW_EMAC_MAL_CLR_ICINTSTAT - select IBM_NEW_EMAC_MAL_COMMON_ERR + select IBM_EMAC_NO_FLOW_CTRL + select IBM_EMAC_MAL_CLR_ICINTSTAT + select IBM_EMAC_MAL_COMMON_ERR
config 405GPR bool diff --combined arch/s390/include/asm/qdio.h index cbbf7d8,2199362..e63d13d --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@@ -46,8 -46,6 +46,8 @@@ struct qdesfmt0 u32 : 16; } __attribute__ ((packed));
+#define QDR_AC_MULTI_BUFFER_ENABLE 0x01 + /** * struct qdr - queue description record (QDR) * @qfmt: queue format @@@ -125,6 -123,40 +125,40 @@@ struct slibe };
/** + * struct qaob - queue asynchronous operation block + * @res0: reserved parameters + * @res1: reserved parameter + * @res2: reserved parameter + * @res3: reserved parameter + * @aorc: asynchronous operation return code + * @flags: internal flags + * @cbtbs: control block type + * @sb_count: number of storage blocks + * @sba: storage block element addresses + * @dcount: size of storage block elements + * @user0: user defineable value + * @res4: reserved paramater + * @user1: user defineable value + * @user2: user defineable value + */ + struct qaob { + u64 res0[6]; + u8 res1; + u8 res2; + u8 res3; + u8 aorc; + u8 flags; + u16 cbtbs; + u8 sb_count; + u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER]; + u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER]; + u64 user0; + u64 res4[2]; + u64 user1; + u64 user2; + } __attribute__ ((packed, aligned(256))); + + /** * struct slib - storage list information block (SLIB) * @nsliba: next SLIB address (if any) * @sla: SL address @@@ -224,11 -256,44 +258,46 @@@ struct slsb u8 val[QDIO_MAX_BUFFERS_PER_Q]; } __attribute__ ((packed, aligned(256)));
+#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080 +#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040 #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 #define CHSC_AC2_DATA_DIV_ENABLED 0x0002
+ /** + * struct qdio_outbuf_state - SBAL related asynchronous operation information + * (for communication with upper layer programs) + * (only required for use with completion queues) + * @flags: flags indicating state of buffer + * @aob: pointer to QAOB used for the particular SBAL + * @user: pointer to upper layer program's state information related to SBAL + * (stored in user1 data of QAOB) + */ + struct qdio_outbuf_state { + u8 flags; + struct qaob *aob; + void *user; + }; + + #define QDIO_OUTBUF_STATE_FLAG_NONE 0x00 + #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01 + + #define CHSC_AC1_INITIATE_INPUTQ 0x80 + + + /* qdio adapter-characteristics-1 flag */ + #define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ + #define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ + #define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ + #define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */ + #define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */ + #define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ + #define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ + + #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 + #define CHSC_AC2_DATA_DIV_ENABLED 0x0002 + + #define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000 + struct qdio_ssqd_desc { u8 flags; u8:8; @@@ -247,8 -312,7 +316,7 @@@ u64 sch_token; u8 mro; u8 mri; - u8:8; - u8 sbalic; + u16 qdioac3; u16:16; u8:8; u8 mmwc; @@@ -284,14 -348,15 +352,16 @@@ typedef void qdio_handler_t(struct ccw_ * @no_output_qs: number of output queues * @input_handler: handler to be called for input queues * @output_handler: handler to be called for output queues + * @queue_start_poll: polling handlers (one per input queue or NULL) * @int_parm: interruption parameter * @input_sbal_addr_array: address of no_input_qs * 128 pointers * @output_sbal_addr_array: address of no_output_qs * 128 pointers + * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL) */ struct qdio_initialize { struct ccw_device *cdev; unsigned char q_format; + unsigned char qdr_ac; unsigned char adapter_name[8]; unsigned int qib_param_field_format; unsigned char *qib_param_field; @@@ -302,11 -367,12 +372,12 @@@ unsigned int no_output_qs; qdio_handler_t *input_handler; qdio_handler_t *output_handler; - void (*queue_start_poll) (struct ccw_device *, int, unsigned long); + void (**queue_start_poll) (struct ccw_device *, int, unsigned long); int scan_threshold; unsigned long int_parm; void **input_sbal_addr_array; void **output_sbal_addr_array; + struct qdio_outbuf_state *output_sbal_state_array; };
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ @@@ -321,6 -387,7 +392,7 @@@ extern int qdio_allocate(struct qdio_initialize *); extern int qdio_establish(struct qdio_initialize *); extern int qdio_activate(struct ccw_device *); + extern void qdio_release_aob(struct qaob *); extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, unsigned int); extern int qdio_start_irq(struct ccw_device *, int); diff --combined drivers/infiniband/hw/nes/nes_nic.c index 64f91d8,47b2ee4..c00d2f3 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@@ -441,13 -441,13 +441,13 @@@ static int nes_nic_send(struct sk_buff nesnic->tx_skb[nesnic->sq_head] = skb; for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags; skb_fragment_index++) { - bus_address = pci_map_page( nesdev->pcidev, - skb_shinfo(skb)->frags[skb_fragment_index].page, - skb_shinfo(skb)->frags[skb_fragment_index].page_offset, - skb_shinfo(skb)->frags[skb_fragment_index].size, - PCI_DMA_TODEVICE); + skb_frag_t *frag = + &skb_shinfo(skb)->frags[skb_fragment_index]; + bus_address = skb_frag_dma_map(&nesdev->pcidev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); wqe_fragment_length[wqe_fragment_index] = - cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size); + cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[skb_fragment_index])); set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), bus_address); wqe_fragment_index++; @@@ -561,11 -561,12 +561,12 @@@ tso_sq_no_longer_full /* Map all the buffers */ for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags; tso_frag_count++) { - tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev, - skb_shinfo(skb)->frags[tso_frag_count].page, - skb_shinfo(skb)->frags[tso_frag_count].page_offset, - skb_shinfo(skb)->frags[tso_frag_count].size, - PCI_DMA_TODEVICE); + skb_frag_t *frag = + &skb_shinfo(skb)->frags[tso_frag_count]; + tso_bus_address[tso_frag_count] = + skb_frag_dma_map(&nesdev->pcidev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); }
tso_frag_index = 0; @@@ -636,11 -637,11 +637,11 @@@ } while (wqe_fragment_index < 5) { wqe_fragment_length[wqe_fragment_index] = - cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size); + cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index])); set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), (u64)tso_bus_address[tso_frag_index]); wqe_fragment_index++; - tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size; + tso_wqe_length += skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index++]); if (wqe_fragment_index < 5) wqe_fragment_length[wqe_fragment_index] = 0; if (tso_frag_index == tso_frag_count) @@@ -1090,8 -1091,6 +1091,8 @@@ static const char nes_ethtool_stringset "LRO aggregated", "LRO flushed", "LRO no_desc", + "PAU CreateQPs", + "PAU DestroyQPs", }; #define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
@@@ -1307,8 -1306,6 +1308,8 @@@ static void nes_netdev_get_ethtool_stat target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; + target_stat_values[++index] = atomic_read(&pau_qps_created); + target_stat_values[++index] = atomic_read(&pau_qps_destroyed); }
/** @@@ -1642,7 -1639,7 +1643,7 @@@ static const struct net_device_ops nes_ .ndo_get_stats = nes_netdev_get_stats, .ndo_tx_timeout = nes_netdev_tx_timeout, .ndo_set_mac_address = nes_netdev_set_mac_address, - .ndo_set_multicast_list = nes_netdev_set_multicast_list, + .ndo_set_rx_mode = nes_netdev_set_multicast_list, .ndo_change_mtu = nes_netdev_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = nes_fix_features, diff --combined drivers/infiniband/ulp/ipoib/ipoib_cm.c index ef003cc,c74548a..57b4f9a --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@@ -169,7 -169,7 +169,7 @@@ static struct sk_buff *ipoib_cm_alloc_r goto partial_error; skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
- mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page, + mapping[i + 1] = ib_dma_map_page(priv->ca, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) goto partial_error; @@@ -537,12 -537,13 +537,13 @@@ static void skb_put_frags(struct sk_buf
if (length == 0) { /* don't need this page */ - skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); + skb_fill_page_desc(toskb, i, skb_frag_page(frag), + 0, PAGE_SIZE); --skb_shinfo(skb)->nr_frags; } else { size = min(length, (unsigned) PAGE_SIZE);
- frag->size = size; + skb_frag_size_set(frag, size); skb->data_len += size; skb->truesize += size; skb->len += size; @@@ -1496,7 -1497,6 +1497,7 @@@ static void ipoib_cm_create_srq(struct { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_srq_init_attr srq_init_attr = { + .srq_type = IB_SRQT_BASIC, .attr = { .max_wr = ipoib_recvq_size, .max_sge = max_sge diff --combined drivers/net/bonding/bond_main.c index de3d351,41430ba..71efff3 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -557,7 -557,7 +557,7 @@@ down static int bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; - struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET }; + struct ethtool_cmd ecmd; u32 slave_speed; int res;
@@@ -565,18 -565,15 +565,15 @@@ slave->speed = SPEED_100; slave->duplex = DUPLEX_FULL;
- if (!slave_dev->ethtool_ops || !slave_dev->ethtool_ops->get_settings) - return -1; - - res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool); + res = __ethtool_get_settings(slave_dev, &ecmd); if (res < 0) return -1;
- slave_speed = ethtool_cmd_speed(&etool); + slave_speed = ethtool_cmd_speed(&ecmd); if (slave_speed == 0 || slave_speed == ((__u32) -1)) return -1;
- switch (etool.duplex) { + switch (ecmd.duplex) { case DUPLEX_FULL: case DUPLEX_HALF: break; @@@ -585,7 -582,7 +582,7 @@@ }
slave->speed = slave_speed; - slave->duplex = etool.duplex; + slave->duplex = ecmd.duplex;
return 0; } @@@ -1435,8 -1432,6 +1432,8 @@@ static rx_handler_result_t bond_handle_ struct sk_buff *skb = *pskb; struct slave *slave; struct bonding *bond; + void (*recv_probe)(struct sk_buff *, struct bonding *, + struct slave *);
skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) @@@ -1450,12 -1445,11 +1447,12 @@@ if (bond->params.arp_interval) slave->dev->last_rx = jiffies;
- if (bond->recv_probe) { + recv_probe = ACCESS_ONCE(bond->recv_probe); + if (recv_probe) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) { - bond->recv_probe(nskb, bond, slave); + recv_probe(nskb, bond, slave); dev_kfree_skb(nskb); } } @@@ -3710,44 -3704,27 +3707,27 @@@ static bool bond_addr_in_mc_list(unsign return false; }
- static void bond_set_multicast_list(struct net_device *bond_dev) + static void bond_change_rx_flags(struct net_device *bond_dev, int change) { struct bonding *bond = netdev_priv(bond_dev); - struct netdev_hw_addr *ha; - bool found;
- /* - * Do promisc before checking multicast_mode - */ - if ((bond_dev->flags & IFF_PROMISC) && !(bond->flags & IFF_PROMISC)) - /* - * FIXME: Need to handle the error when one of the multi-slaves - * encounters error. - */ - bond_set_promiscuity(bond, 1); - - - if (!(bond_dev->flags & IFF_PROMISC) && (bond->flags & IFF_PROMISC)) - bond_set_promiscuity(bond, -1); - - - /* set allmulti flag to slaves */ - if ((bond_dev->flags & IFF_ALLMULTI) && !(bond->flags & IFF_ALLMULTI)) - /* - * FIXME: Need to handle the error when one of the multi-slaves - * encounters error. - */ - bond_set_allmulti(bond, 1); + if (change & IFF_PROMISC) + bond_set_promiscuity(bond, + bond_dev->flags & IFF_PROMISC ? 1 : -1);
+ if (change & IFF_ALLMULTI) + bond_set_allmulti(bond, + bond_dev->flags & IFF_ALLMULTI ? 1 : -1); + }
- if (!(bond_dev->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI)) - bond_set_allmulti(bond, -1); - + static void bond_set_multicast_list(struct net_device *bond_dev) + { + struct bonding *bond = netdev_priv(bond_dev); + struct netdev_hw_addr *ha; + bool found;
read_lock(&bond->lock);
- bond->flags = bond_dev->flags; - /* looking for addresses to add to slaves' mc list */ netdev_for_each_mc_addr(ha, bond_dev) { found = bond_addr_in_mc_list(ha->addr, &bond->mc_list, @@@ -4306,7 -4283,8 +4286,8 @@@ static const struct net_device_ops bond .ndo_select_queue = bond_select_queue, .ndo_get_stats64 = bond_get_stats, .ndo_do_ioctl = bond_do_ioctl, - .ndo_set_multicast_list = bond_set_multicast_list, + .ndo_change_rx_flags = bond_change_rx_flags, + .ndo_set_rx_mode = bond_set_multicast_list, .ndo_change_mtu = bond_change_mtu, .ndo_set_mac_address = bond_set_mac_address, .ndo_neigh_setup = bond_neigh_setup, @@@ -4852,11 -4830,20 +4833,20 @@@ static int bond_validate(struct nlattr return 0; }
+ static int bond_get_tx_queues(struct net *net, struct nlattr *tb[], + unsigned int *num_queues, + unsigned int *real_num_queues) + { + *num_queues = tx_queues; + return 0; + } + static struct rtnl_link_ops bond_link_ops __read_mostly = { .kind = "bond", .priv_size = sizeof(struct bonding), .setup = bond_setup, .validate = bond_validate, + .get_tx_queues = bond_get_tx_queues, };
/* Create a new bond based on the specified name and bonding parameters. @@@ -4901,6 -4888,7 +4891,7 @@@ static int __net_init bond_net_init(str INIT_LIST_HEAD(&bn->dev_list);
bond_create_proc_dir(bn); + bond_create_sysfs(bn); return 0; } @@@ -4909,6 -4897,7 +4900,7 @@@ static void __net_exit bond_net_exit(st { struct bond_net *bn = net_generic(net, bond_net_id);
+ bond_destroy_sysfs(bn); bond_destroy_proc_dir(bn); }
@@@ -4946,10 -4935,6 +4938,6 @@@ static int __init bonding_init(void goto err; }
- res = bond_create_sysfs(); - if (res) - goto err; - register_netdevice_notifier(&bond_netdev_notifier); register_inetaddr_notifier(&bond_inetaddr_notifier); out: @@@ -4967,7 -4952,6 +4955,6 @@@ static void __exit bonding_exit(void unregister_netdevice_notifier(&bond_netdev_notifier); unregister_inetaddr_notifier(&bond_inetaddr_notifier);
- bond_destroy_sysfs(); bond_destroy_debugfs();
rtnl_link_unregister(&bond_link_ops); diff --combined drivers/net/can/mscan/mscan.c index 4cc6f44,ac42f5d..ec4a311 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@@ -62,7 -62,7 +62,7 @@@ static enum can_state state_map[] = static int mscan_set_mode(struct net_device *dev, u8 mode) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; int ret = 0; int i; u8 canctl1; @@@ -138,7 -138,7 +138,7 @@@ static int mscan_start(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; u8 canrflg; int err;
@@@ -178,7 -178,7 +178,7 @@@ static int mscan_restart(struct net_dev struct mscan_priv *priv = netdev_priv(dev);
if (priv->type == MSCAN_TYPE_MPC5121) { - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base;
priv->can.state = CAN_STATE_ERROR_ACTIVE; WARN(!(in_8(®s->canmisc) & MSCAN_BOHOLD), @@@ -199,7 -199,7 +199,7 @@@ static netdev_tx_t mscan_start_xmit(str { struct can_frame *frame = (struct can_frame *)skb->data; struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; int i, rtr, buf_id; u32 can_id;
@@@ -261,13 -261,11 +261,13 @@@ void __iomem *data = ®s->tx.dsr1_0; u16 *payload = (u16 *)frame->data;
- /* It is safe to write into dsr[dlc+1] */ - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + for (i = 0; i < frame->can_dlc / 2; i++) { out_be16(data, *payload++); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } + /* write remaining byte if necessary */ + if (frame->can_dlc & 1) + out_8(data, frame->data[frame->can_dlc - 1]); }
out_8(®s->tx.dlr, frame->can_dlc); @@@ -307,7 -305,7 +307,7 @@@ static enum can_state check_set_state(s static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; u32 can_id; int i;
@@@ -332,13 -330,10 +332,13 @@@ void __iomem *data = ®s->rx.dsr1_0; u16 *payload = (u16 *)frame->data;
- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + for (i = 0; i < frame->can_dlc / 2; i++) { *payload++ = in_be16(data); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } + /* read remaining byte if necessary */ + if (frame->can_dlc & 1) + frame->data[frame->can_dlc - 1] = in_8(data); }
out_8(®s->canrflg, MSCAN_RXF); @@@ -348,7 -343,7 +348,7 @@@ static void mscan_get_err_frame(struct u8 canrflg) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; enum can_state old_state;
@@@ -411,7 -406,7 +411,7 @@@ static int mscan_rx_poll(struct napi_st { struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi); struct net_device *dev = napi->dev; - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; int npackets = 0; int ret = 1; @@@ -458,7 -453,7 +458,7 @@@ static irqreturn_t mscan_isr(int irq, v { struct net_device *dev = (struct net_device *)dev_id; struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; u8 cantier, cantflg, canrflg; irqreturn_t ret = IRQ_NONE; @@@ -542,7 -537,7 +542,7 @@@ static int mscan_do_set_mode(struct net static int mscan_do_set_bittiming(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; struct can_bittiming *bt = &priv->can.bittiming; u8 btr0, btr1;
@@@ -564,7 -559,7 +564,7 @@@ static int mscan_open(struct net_devic { int ret; struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base;
/* common open */ ret = open_candev(dev); @@@ -603,7 -598,7 +603,7 @@@ exit_napi_disable static int mscan_close(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base;
netif_stop_queue(dev); napi_disable(&priv->napi); @@@ -627,7 -622,7 +627,7 @@@ static const struct net_device_ops msca int register_mscandev(struct net_device *dev, int mscan_clksrc) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; u8 ctl1;
ctl1 = in_8(®s->canctl1); @@@ -664,7 -659,7 +664,7 @@@ void unregister_mscandev(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; mscan_set_mode(dev, MSCAN_INIT_MODE); clrbits8(®s->canctl1, MSCAN_CANE); unregister_candev(dev); diff --combined drivers/net/cris/eth_v10.c index 5c939ce,7cb2785..c7e1df9 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@@ -261,7 -261,7 +261,7 @@@ static const struct net_device_ops e100 .ndo_start_xmit = e100_send_packet, .ndo_tx_timeout = e100_tx_timeout, .ndo_get_stats = e100_get_stats, - .ndo_set_multicast_list = set_multicast_list, + .ndo_set_rx_mode = set_multicast_list, .ndo_do_ioctl = e100_ioctl, .ndo_set_mac_address = e100_set_mac_address, .ndo_validate_addr = eth_validate_addr, @@@ -1132,6 -1132,7 +1132,6 @@@ static irqreturn_ e100rxtx_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; - struct net_local *np = netdev_priv(dev); unsigned long irqbits;
/* diff --combined drivers/net/ethernet/amd/au1000_eth.c index 0000000,8238667..4865ff1 mode 000000,100644..100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@@ -1,0 -1,1332 +1,1356 @@@ + /* + * + * Alchemy Au1x00 ethernet driver + * + * Copyright 2001-2003, 2006 MontaVista Software Inc. + * Copyright 2002 TimeSys Corp. + * Added ethtool/mii-tool support, + * Copyright 2004 Matt Porter mporter@kernel.crashing.org + * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de + * or riemer@riemer-nt.de: fixed the link beat detection with + * ioctls (SIOCGMIIPHY) + * Copyright 2006 Herbert Valerio Riedel hvr@gnu.org + * converted to use linux-2.6.x's PHY framework + * + * Author: MontaVista Software, Inc. + * ppopov@mvista.com or source@mvista.com + * + * ######################################################################## + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * ######################################################################## + * + * + */ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include <linux/capability.h> + #include <linux/dma-mapping.h> + #include <linux/module.h> + #include <linux/kernel.h> + #include <linux/string.h> + #include <linux/timer.h> + #include <linux/errno.h> + #include <linux/in.h> + #include <linux/ioport.h> + #include <linux/bitops.h> + #include <linux/slab.h> + #include <linux/interrupt.h> + #include <linux/init.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/ethtool.h> + #include <linux/mii.h> + #include <linux/skbuff.h> + #include <linux/delay.h> + #include <linux/crc32.h> + #include <linux/phy.h> + #include <linux/platform_device.h> + #include <linux/cpu.h> + #include <linux/io.h> + + #include <asm/mipsregs.h> + #include <asm/irq.h> + #include <asm/processor.h> + + #include <au1000.h> + #include <au1xxx_eth.h> + #include <prom.h> + + #include "au1000_eth.h" + + #ifdef AU1000_ETH_DEBUG + static int au1000_debug = 5; + #else + static int au1000_debug = 3; + #endif + + #define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + + #define DRV_NAME "au1000_eth" + #define DRV_VERSION "1.7" + #define DRV_AUTHOR "Pete Popov ppopov@embeddedalley.com" + #define DRV_DESC "Au1xxx on-chip Ethernet driver" + + MODULE_AUTHOR(DRV_AUTHOR); + MODULE_DESCRIPTION(DRV_DESC); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_VERSION); + + /* + * Theory of operation + * + * The Au1000 MACs use a simple rx and tx descriptor ring scheme. + * There are four receive and four transmit descriptors. These + * descriptors are not in memory; rather, they are just a set of + * hardware registers. + * + * Since the Au1000 has a coherent data cache, the receive and + * transmit buffers are allocated from the KSEG0 segment. The + * hardware registers, however, are still mapped at KSEG1 to + * make sure there's no out-of-order writes, and that all writes + * complete immediately. + */ + + /* + * board-specific configurations + * + * PHY detection algorithm + * + * If phy_static_config is undefined, the PHY setup is + * autodetected: + * + * mii_probe() first searches the current MAC's MII bus for a PHY, + * selecting the first (or last, if phy_search_highest_addr is + * defined) PHY address not already claimed by another netdev. + * + * If nothing was found that way when searching for the 2nd ethernet + * controller's PHY and phy1_search_mac0 is defined, then + * the first MII bus is searched as well for an unclaimed PHY; this is + * needed in case of a dual-PHY accessible only through the MAC0's MII + * bus. + * + * Finally, if no PHY is found, then the corresponding ethernet + * controller is not registered to the network subsystem. + */ + + /* autodetection defaults: phy1_search_mac0 */ + + /* static PHY setup + * + * most boards PHY setup should be detectable properly with the + * autodetection algorithm in mii_probe(), but in some cases (e.g. if + * you have a switch attached, or want to use the PHY's interrupt + * notification capabilities) you can provide a static PHY + * configuration here + * + * IRQs may only be set, if a PHY address was configured + * If a PHY address is given, also a bus id is required to be set + * + * ps: make sure the used irqs are configured properly in the board + * specific irq-map + */ + + static void au1000_enable_mac(struct net_device *dev, int force_reset) + { + unsigned long flags; + struct au1000_private *aup = netdev_priv(dev); + + spin_lock_irqsave(&aup->lock, flags); + + if (force_reset || (!aup->mac_enabled)) { + writel(MAC_EN_CLOCK_ENABLE, aup->enable); + au_sync_delay(2); + writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 + | MAC_EN_CLOCK_ENABLE), aup->enable); + au_sync_delay(2); + + aup->mac_enabled = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); + } + + /* + * MII operations + */ + static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) + { + struct au1000_private *aup = netdev_priv(dev); + u32 *const mii_control_reg = &aup->mac->mii_control; + u32 *const mii_data_reg = &aup->mac->mii_data; + u32 timedout = 20; + u32 mii_control; + + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "read_MII busy timeout!!\n"); + return -1; + } + } + + mii_control = MAC_SET_MII_SELECT_REG(reg) | + MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ; + + writel(mii_control, mii_control_reg); + + timedout = 20; + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "mdio_read busy timeout!!\n"); + return -1; + } + } + return readl(mii_data_reg); + } + + static void au1000_mdio_write(struct net_device *dev, int phy_addr, + int reg, u16 value) + { + struct au1000_private *aup = netdev_priv(dev); + u32 *const mii_control_reg = &aup->mac->mii_control; + u32 *const mii_data_reg = &aup->mac->mii_data; + u32 timedout = 20; + u32 mii_control; + + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "mdio_write busy timeout!!\n"); + return; + } + } + + mii_control = MAC_SET_MII_SELECT_REG(reg) | + MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE; + + writel(value, mii_data_reg); + writel(mii_control, mii_control_reg); + } + + static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) + { + /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does + * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) + */ + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + return au1000_mdio_read(dev, phy_addr, regnum); + } + + static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) + { + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + au1000_mdio_write(dev, phy_addr, regnum, value); + return 0; + } + + static int au1000_mdiobus_reset(struct mii_bus *bus) + { + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + return 0; + } + + static void au1000_hard_stop(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, drv, dev, "hard stop\n"); + + reg = readl(&aup->mac->control); + reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); + writel(reg, &aup->mac->control); + au_sync_delay(10); + } + + static void au1000_enable_rx_tx(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, hw, dev, "enable_rx_tx\n"); + + reg = readl(&aup->mac->control); + reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE); + writel(reg, &aup->mac->control); + au_sync_delay(10); + } + + static void + au1000_adjust_link(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + struct phy_device *phydev = aup->phy_dev; + unsigned long flags; + u32 reg; + + int status_change = 0; + + BUG_ON(!aup->phy_dev); + + spin_lock_irqsave(&aup->lock, flags); + + if (phydev->link && (aup->old_speed != phydev->speed)) { + /* speed changed */ + + switch (phydev->speed) { + case SPEED_10: + case SPEED_100: + break; + default: + netdev_warn(dev, "Speed (%d) is not 10/100 ???\n", + phydev->speed); + break; + } + + aup->old_speed = phydev->speed; + + status_change = 1; + } + + if (phydev->link && (aup->old_duplex != phydev->duplex)) { + /* duplex mode changed */ + + /* switching duplex mode requires to disable rx and tx! */ + au1000_hard_stop(dev); + + reg = readl(&aup->mac->control); + if (DUPLEX_FULL == phydev->duplex) { + reg |= MAC_FULL_DUPLEX; + reg &= ~MAC_DISABLE_RX_OWN; + } else { + reg &= ~MAC_FULL_DUPLEX; + reg |= MAC_DISABLE_RX_OWN; + } + writel(reg, &aup->mac->control); + au_sync_delay(1); + + au1000_enable_rx_tx(dev); + aup->old_duplex = phydev->duplex; + + status_change = 1; + } + + if (phydev->link != aup->old_link) { + /* link state changed */ + + if (!phydev->link) { + /* link went down */ + aup->old_speed = 0; + aup->old_duplex = -1; + } + + aup->old_link = phydev->link; + status_change = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); + + if (status_change) { + if (phydev->link) + netdev_info(dev, "link up (%d/%s)\n", + phydev->speed, + DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); + else + netdev_info(dev, "link down\n"); + } + } + + static int au1000_mii_probe(struct net_device *dev) + { + struct au1000_private *const aup = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + + if (aup->phy_static_config) { + BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); + + if (aup->phy_addr) + phydev = aup->mii_bus->phy_map[aup->phy_addr]; + else + netdev_info(dev, "using PHY-less setup\n"); + return 0; + } + + /* find the first (lowest address) PHY + * on the current MAC's MII bus + */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) + if (aup->mii_bus->phy_map[phy_addr]) { + phydev = aup->mii_bus->phy_map[phy_addr]; + if (!aup->phy_search_highest_addr) + /* break out with first one found */ + break; + } + + if (aup->phy1_search_mac0) { + /* try harder to find a PHY */ + if (!phydev && (aup->mac_id == 1)) { + /* no PHY found, maybe we have a dual PHY? */ + dev_info(&dev->dev, ": no PHY found on MAC1, " + "let's see if it's attached to MAC0...\n"); + + /* find the first (lowest address) non-attached + * PHY on the MAC0 MII bus + */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + struct phy_device *const tmp_phydev = + aup->mii_bus->phy_map[phy_addr]; + + if (aup->mac_id == 1) + break; + + /* no PHY here... */ + if (!tmp_phydev) + continue; + + /* already claimed by MAC0 */ + if (tmp_phydev->attached_dev) + continue; + + phydev = tmp_phydev; + break; /* found it */ + } + } + } + + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -1; + } + + /* now we are supposed to have a proper phydev, to attach to... */ + BUG_ON(phydev->attached_dev); + + phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link, + 0, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + + aup->old_link = 0; + aup->old_speed = 0; + aup->old_duplex = -1; + aup->phy_dev = phydev; + + netdev_info(dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + return 0; + } + + + /* + * Buffer allocation/deallocation routines. The buffer descriptor returned + * has the virtual and dma address of a buffer suitable for + * both, receive and transmit operations. + */ + static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup) + { + struct db_dest *pDB; + pDB = aup->pDBfree; + + if (pDB) + aup->pDBfree = pDB->pnext; + + return pDB; + } + + void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB) + { + struct db_dest *pDBfree = aup->pDBfree; + if (pDBfree) + pDBfree->pnext = pDB; + aup->pDBfree = pDB; + } + + static void au1000_reset_mac_unlocked(struct net_device *dev) + { + struct au1000_private *const aup = netdev_priv(dev); + int i; + + au1000_hard_stop(dev); + + writel(MAC_EN_CLOCK_ENABLE, aup->enable); + au_sync_delay(2); + writel(0, aup->enable); + au_sync_delay(2); + + aup->tx_full = 0; + for (i = 0; i < NUM_RX_DMA; i++) { + /* reset control bits */ + aup->rx_dma_ring[i]->buff_stat &= ~0xf; + } + for (i = 0; i < NUM_TX_DMA; i++) { + /* reset control bits */ + aup->tx_dma_ring[i]->buff_stat &= ~0xf; + } + + aup->mac_enabled = 0; + + } + + static void au1000_reset_mac(struct net_device *dev) + { + struct au1000_private *const aup = netdev_priv(dev); + unsigned long flags; + + netif_dbg(aup, hw, dev, "reset mac, aup %x\n", + (unsigned)aup); + + spin_lock_irqsave(&aup->lock, flags); + + au1000_reset_mac_unlocked(dev); + + spin_unlock_irqrestore(&aup->lock, flags); + } + + /* + * Setup the receive and transmit "rings". These pointers are the addresses + * of the rx and tx MAC DMA registers so they are fixed by the hardware -- + * these are not descriptors sitting in memory. + */ + static void -au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) ++au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base) + { + int i; + + for (i = 0; i < NUM_RX_DMA; i++) { - aup->rx_dma_ring[i] = - (struct rx_dma *) - (rx_base + sizeof(struct rx_dma)*i); ++ aup->rx_dma_ring[i] = (struct rx_dma *) ++ (tx_base + 0x100 + sizeof(struct rx_dma) * i); + } + for (i = 0; i < NUM_TX_DMA; i++) { - aup->tx_dma_ring[i] = - (struct tx_dma *) - (tx_base + sizeof(struct tx_dma)*i); ++ aup->tx_dma_ring[i] = (struct tx_dma *) ++ (tx_base + sizeof(struct tx_dma) * i); + } + } + + /* + * ethtool operations + */ + + static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct au1000_private *aup = netdev_priv(dev); + + if (aup->phy_dev) + return phy_ethtool_gset(aup->phy_dev, cmd); + + return -EINVAL; + } + + static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct au1000_private *aup = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (aup->phy_dev) + return phy_ethtool_sset(aup->phy_dev, cmd); + + return -EINVAL; + } + + static void + au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) + { + struct au1000_private *aup = netdev_priv(dev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + info->fw_version[0] = '\0'; + sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id); + info->regdump_len = 0; + } + + static void au1000_set_msglevel(struct net_device *dev, u32 value) + { + struct au1000_private *aup = netdev_priv(dev); + aup->msg_enable = value; + } + + static u32 au1000_get_msglevel(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + return aup->msg_enable; + } + + static const struct ethtool_ops au1000_ethtool_ops = { + .get_settings = au1000_get_settings, + .set_settings = au1000_set_settings, + .get_drvinfo = au1000_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = au1000_get_msglevel, + .set_msglevel = au1000_set_msglevel, + }; + + + /* + * Initialize the interface. + * + * When the device powers up, the clocks are disabled and the + * mac is in reset state. When the interface is closed, we + * do the same -- reset the device and disable the clocks to + * conserve power. Thus, whenever au1000_init() is called, + * the device should already be in reset state. + */ + static int au1000_init(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + unsigned long flags; + int i; + u32 control; + + netif_dbg(aup, hw, dev, "au1000_init\n"); + + /* bring the device out of reset */ + au1000_enable_mac(dev, 1); + + spin_lock_irqsave(&aup->lock, flags); + + writel(0, &aup->mac->control); + aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2; + aup->tx_tail = aup->tx_head; + aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2; + + writel(dev->dev_addr[5]<<8 | dev->dev_addr[4], + &aup->mac->mac_addr_high); + writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 | + dev->dev_addr[1]<<8 | dev->dev_addr[0], + &aup->mac->mac_addr_low); + + + for (i = 0; i < NUM_RX_DMA; i++) + aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE; + + au_sync(); + + control = MAC_RX_ENABLE | MAC_TX_ENABLE; + #ifndef CONFIG_CPU_LITTLE_ENDIAN + control |= MAC_BIG_ENDIAN; + #endif + if (aup->phy_dev) { + if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex)) + control |= MAC_FULL_DUPLEX; + else + control |= MAC_DISABLE_RX_OWN; + } else { /* PHY-less op, assume full-duplex */ + control |= MAC_FULL_DUPLEX; + } + + writel(control, &aup->mac->control); + writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */ + au_sync(); + + spin_unlock_irqrestore(&aup->lock, flags); + return 0; + } + + static inline void au1000_update_rx_stats(struct net_device *dev, u32 status) + { + struct net_device_stats *ps = &dev->stats; + + ps->rx_packets++; + if (status & RX_MCAST_FRAME) + ps->multicast++; + + if (status & RX_ERROR) { + ps->rx_errors++; + if (status & RX_MISSED_FRAME) + ps->rx_missed_errors++; + if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) + ps->rx_length_errors++; + if (status & RX_CRC_ERROR) + ps->rx_crc_errors++; + if (status & RX_COLL) + ps->collisions++; + } else + ps->rx_bytes += status & RX_FRAME_LEN_MASK; + + } + + /* + * Au1000 receive routine. + */ + static int au1000_rx(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + struct sk_buff *skb; + struct rx_dma *prxd; + u32 buff_stat, status; + struct db_dest *pDB; + u32 frmlen; + + netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head); + + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; + while (buff_stat & RX_T_DONE) { + status = prxd->status; + pDB = aup->rx_db_inuse[aup->rx_head]; + au1000_update_rx_stats(dev, status); + if (!(status & RX_ERROR)) { + + /* good frame */ + frmlen = (status & RX_FRAME_LEN_MASK); + frmlen -= 4; /* Remove FCS */ + skb = dev_alloc_skb(frmlen + 2); + if (skb == NULL) { + netdev_err(dev, "Memory squeeze, dropping packet.\n"); + dev->stats.rx_dropped++; + continue; + } + skb_reserve(skb, 2); /* 16 byte IP header align */ + skb_copy_to_linear_data(skb, + (unsigned char *)pDB->vaddr, frmlen); + skb_put(skb, frmlen); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); /* pass the packet to upper layers */ + } else { + if (au1000_debug > 4) { + pr_err("rx_error(s):"); + if (status & RX_MISSED_FRAME) + pr_cont(" miss"); + if (status & RX_WDOG_TIMER) + pr_cont(" wdog"); + if (status & RX_RUNT) + pr_cont(" runt"); + if (status & RX_OVERLEN) + pr_cont(" overlen"); + if (status & RX_COLL) + pr_cont(" coll"); + if (status & RX_MII_ERROR) + pr_cont(" mii error"); + if (status & RX_CRC_ERROR) + pr_cont(" crc error"); + if (status & RX_LEN_ERROR) + pr_cont(" len error"); + if (status & RX_U_CNTRL_FRAME) + pr_cont(" u control frame"); + pr_cont("\n"); + } + } + prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); + aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); + au_sync(); + + /* next descriptor */ + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; + } + return 0; + } + + static void au1000_update_tx_stats(struct net_device *dev, u32 status) + { + struct au1000_private *aup = netdev_priv(dev); + struct net_device_stats *ps = &dev->stats; + + if (status & TX_FRAME_ABORTED) { + if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { + if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { + /* any other tx errors are only valid + * in half duplex mode + */ + ps->tx_errors++; + ps->tx_aborted_errors++; + } + } else { + ps->tx_errors++; + ps->tx_aborted_errors++; + if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) + ps->tx_carrier_errors++; + } + } + } + + /* + * Called from the interrupt service routine to acknowledge + * the TX DONE bits. This is a must if the irq is setup as + * edge triggered. + */ + static void au1000_tx_ack(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + struct tx_dma *ptxd; + + ptxd = aup->tx_dma_ring[aup->tx_tail]; + + while (ptxd->buff_stat & TX_T_DONE) { + au1000_update_tx_stats(dev, ptxd->status); + ptxd->buff_stat &= ~TX_T_DONE; + ptxd->len = 0; + au_sync(); + + aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); + ptxd = aup->tx_dma_ring[aup->tx_tail]; + + if (aup->tx_full) { + aup->tx_full = 0; + netif_wake_queue(dev); + } + } + } + + /* + * Au1000 interrupt service routine. + */ + static irqreturn_t au1000_interrupt(int irq, void *dev_id) + { + struct net_device *dev = dev_id; + + /* Handle RX interrupts first to minimize chance of overrun */ + + au1000_rx(dev); + au1000_tx_ack(dev); + return IRQ_RETVAL(1); + } + + static int au1000_open(struct net_device *dev) + { + int retval; + struct au1000_private *aup = netdev_priv(dev); + + netif_dbg(aup, drv, dev, "open: dev=%p\n", dev); + + retval = request_irq(dev->irq, au1000_interrupt, 0, + dev->name, dev); + if (retval) { + netdev_err(dev, "unable to get IRQ %d\n", dev->irq); + return retval; + } + + retval = au1000_init(dev); + if (retval) { + netdev_err(dev, "error in au1000_init\n"); + free_irq(dev->irq, dev); + return retval; + } + + if (aup->phy_dev) { + /* cause the PHY state machine to schedule a link state check */ + aup->phy_dev->state = PHY_CHANGELINK; + phy_start(aup->phy_dev); + } + + netif_start_queue(dev); + + netif_dbg(aup, drv, dev, "open: Initialization done.\n"); + + return 0; + } + + static int au1000_close(struct net_device *dev) + { + unsigned long flags; + struct au1000_private *const aup = netdev_priv(dev); + + netif_dbg(aup, drv, dev, "close: dev=%p\n", dev); + + if (aup->phy_dev) + phy_stop(aup->phy_dev); + + spin_lock_irqsave(&aup->lock, flags); + + au1000_reset_mac_unlocked(dev); + + /* stop the device */ + netif_stop_queue(dev); + + /* disable the interrupt */ + free_irq(dev->irq, dev); + spin_unlock_irqrestore(&aup->lock, flags); + + return 0; + } + + /* + * Au1000 transmit routine. + */ + static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + struct net_device_stats *ps = &dev->stats; + struct tx_dma *ptxd; + u32 buff_stat; + struct db_dest *pDB; + int i; + + netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n", + (unsigned)aup, skb->len, + skb->data, aup->tx_head); + + ptxd = aup->tx_dma_ring[aup->tx_head]; + buff_stat = ptxd->buff_stat; + if (buff_stat & TX_DMA_ENABLE) { + /* We've wrapped around and the transmitter is still busy */ + netif_stop_queue(dev); + aup->tx_full = 1; + return NETDEV_TX_BUSY; + } else if (buff_stat & TX_T_DONE) { + au1000_update_tx_stats(dev, ptxd->status); + ptxd->len = 0; + } + + if (aup->tx_full) { + aup->tx_full = 0; + netif_wake_queue(dev); + } + + pDB = aup->tx_db_inuse[aup->tx_head]; + skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); + if (skb->len < ETH_ZLEN) { + for (i = skb->len; i < ETH_ZLEN; i++) + ((char *)pDB->vaddr)[i] = 0; + + ptxd->len = ETH_ZLEN; + } else + ptxd->len = skb->len; + + ps->tx_packets++; + ps->tx_bytes += ptxd->len; + + ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; + au_sync(); + dev_kfree_skb(skb); + aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); + return NETDEV_TX_OK; + } + + /* + * The Tx ring has been full longer than the watchdog timeout + * value. The transmitter must be hung? + */ + static void au1000_tx_timeout(struct net_device *dev) + { + netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev); + au1000_reset_mac(dev); + au1000_init(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + } + + static void au1000_multicast_list(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags); + reg = readl(&aup->mac->control); + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + reg |= MAC_PROMISCUOUS; + } else if ((dev->flags & IFF_ALLMULTI) || + netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { + reg |= MAC_PASS_ALL_MULTI; + reg &= ~MAC_PROMISCUOUS; + netdev_info(dev, "Pass all multicast\n"); + } else { + struct netdev_hw_addr *ha; + u32 mc_filter[2]; /* Multicast hash filter */ + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) + set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, + (long *)mc_filter); + writel(mc_filter[1], &aup->mac->multi_hash_high); + writel(mc_filter[0], &aup->mac->multi_hash_low); + reg &= ~MAC_PROMISCUOUS; + reg |= MAC_HASH_MODE; + } + writel(reg, &aup->mac->control); + } + + static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + { + struct au1000_private *aup = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!aup->phy_dev) + return -EINVAL; /* PHY not controllable */ + + return phy_mii_ioctl(aup->phy_dev, rq, cmd); + } + + static const struct net_device_ops au1000_netdev_ops = { + .ndo_open = au1000_open, + .ndo_stop = au1000_close, + .ndo_start_xmit = au1000_tx, + .ndo_set_rx_mode = au1000_multicast_list, + .ndo_do_ioctl = au1000_ioctl, + .ndo_tx_timeout = au1000_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + }; + + static int __devinit au1000_probe(struct platform_device *pdev) + { + static unsigned version_printed; + struct au1000_private *aup = NULL; + struct au1000_eth_platform_data *pd; + struct net_device *dev = NULL; + struct db_dest *pDB, *pDBfree; + int irq, i, err = 0; - struct resource *base, *macen; ++ struct resource *base, *macen, *macdma; + + base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!base) { + dev_err(&pdev->dev, "failed to retrieve base register\n"); + err = -ENODEV; + goto out; + } + + macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!macen) { + dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n"); + err = -ENODEV; + goto out; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to retrieve IRQ\n"); + err = -ENODEV; + goto out; + } + ++ macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ if (!macdma) { ++ dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n"); ++ err = -ENODEV; ++ goto out; ++ } ++ + if (!request_mem_region(base->start, resource_size(base), + pdev->name)) { + dev_err(&pdev->dev, "failed to request memory region for base registers\n"); + err = -ENXIO; + goto out; + } + + if (!request_mem_region(macen->start, resource_size(macen), + pdev->name)) { + dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); + err = -ENXIO; + goto err_request; + } + ++ if (!request_mem_region(macdma->start, resource_size(macdma), ++ pdev->name)) { ++ dev_err(&pdev->dev, "failed to request MACDMA memory region\n"); ++ err = -ENXIO; ++ goto err_macdma; ++ } ++ + dev = alloc_etherdev(sizeof(struct au1000_private)); + if (!dev) { + dev_err(&pdev->dev, "alloc_etherdev failed\n"); + err = -ENOMEM; + goto err_alloc; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + platform_set_drvdata(pdev, dev); + aup = netdev_priv(dev); + + spin_lock_init(&aup->lock); + aup->msg_enable = (au1000_debug < 4 ? + AU1000_DEF_MSG_ENABLE : au1000_debug); + + /* Allocate the data buffers + * Snooping works fine with eth on all au1xxx + */ + aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0); + if (!aup->vaddr) { + dev_err(&pdev->dev, "failed to allocate data buffers\n"); + err = -ENOMEM; + goto err_vaddr; + } + + /* aup->mac is the base address of the MAC's registers */ + aup->mac = (struct mac_reg *) + ioremap_nocache(base->start, resource_size(base)); + if (!aup->mac) { + dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); + err = -ENXIO; + goto err_remap1; + } + + /* Setup some variables for quick register address access */ + aup->enable = (u32 *)ioremap_nocache(macen->start, + resource_size(macen)); + if (!aup->enable) { + dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); + err = -ENXIO; + goto err_remap2; + } + aup->mac_id = pdev->id; + - if (pdev->id == 0) - au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); - else if (pdev->id == 1) - au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); ++ aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma)); ++ if (!aup->macdma) { ++ dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n"); ++ err = -ENXIO; ++ goto err_remap3; ++ } ++ ++ au1000_setup_hw_rings(aup, aup->macdma); + + /* set a random MAC now in case platform_data doesn't provide one */ + random_ether_addr(dev->dev_addr); + + writel(0, aup->enable); + aup->mac_enabled = 0; + + pd = pdev->dev.platform_data; + if (!pd) { + dev_info(&pdev->dev, "no platform_data passed," + " PHY search on MAC0\n"); + aup->phy1_search_mac0 = 1; + } else { + if (is_valid_ether_addr(pd->mac)) + memcpy(dev->dev_addr, pd->mac, 6); + + aup->phy_static_config = pd->phy_static_config; + aup->phy_search_highest_addr = pd->phy_search_highest_addr; + aup->phy1_search_mac0 = pd->phy1_search_mac0; + aup->phy_addr = pd->phy_addr; + aup->phy_busid = pd->phy_busid; + aup->phy_irq = pd->phy_irq; + } + + if (aup->phy_busid && aup->phy_busid > 0) { + dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n"); + err = -ENODEV; + goto err_mdiobus_alloc; + } + + aup->mii_bus = mdiobus_alloc(); + if (aup->mii_bus == NULL) { + dev_err(&pdev->dev, "failed to allocate mdiobus structure\n"); + err = -ENOMEM; + goto err_mdiobus_alloc; + } + + aup->mii_bus->priv = dev; + aup->mii_bus->read = au1000_mdiobus_read; + aup->mii_bus->write = au1000_mdiobus_write; + aup->mii_bus->reset = au1000_mdiobus_reset; + aup->mii_bus->name = "au1000_eth_mii"; + snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id); + aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (aup->mii_bus->irq == NULL) + goto err_out; + + for (i = 0; i < PHY_MAX_ADDR; ++i) + aup->mii_bus->irq[i] = PHY_POLL; + /* if known, set corresponding PHY IRQs */ + if (aup->phy_static_config) + if (aup->phy_irq && aup->phy_busid == aup->mac_id) + aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq; + + err = mdiobus_register(aup->mii_bus); + if (err) { + dev_err(&pdev->dev, "failed to register MDIO bus\n"); + goto err_mdiobus_reg; + } + + if (au1000_mii_probe(dev) != 0) + goto err_out; + + pDBfree = NULL; + /* setup the data buffer descriptors and attach a buffer to each one */ + pDB = aup->db; + for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { + pDB->pnext = pDBfree; + pDBfree = pDB; + pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); + pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); + pDB++; + } + aup->pDBfree = pDBfree; + + for (i = 0; i < NUM_RX_DMA; i++) { + pDB = au1000_GetFreeDB(aup); + if (!pDB) + goto err_out; + + aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->rx_db_inuse[i] = pDB; + } + for (i = 0; i < NUM_TX_DMA; i++) { + pDB = au1000_GetFreeDB(aup); + if (!pDB) + goto err_out; + + aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->tx_dma_ring[i]->len = 0; + aup->tx_db_inuse[i] = pDB; + } + + dev->base_addr = base->start; + dev->irq = irq; + dev->netdev_ops = &au1000_netdev_ops; + SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); + dev->watchdog_timeo = ETH_TX_TIMEOUT; + + /* + * The boot code uses the ethernet controller, so reset it to start + * fresh. au1000_init() expects that the device is in reset state. + */ + au1000_reset_mac(dev); + + err = register_netdev(dev); + if (err) { + netdev_err(dev, "Cannot register net device, aborting.\n"); + goto err_out; + } + + netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", + (unsigned long)base->start, irq); + if (version_printed++ == 0) + pr_info("%s version %s %s\n", + DRV_NAME, DRV_VERSION, DRV_AUTHOR); + + return 0; + + err_out: + if (aup->mii_bus != NULL) + mdiobus_unregister(aup->mii_bus); + + /* here we should have a valid dev plus aup-> register addresses + * so we can reset the mac properly. + */ + au1000_reset_mac(dev); + + for (i = 0; i < NUM_RX_DMA; i++) { + if (aup->rx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); + } + for (i = 0; i < NUM_TX_DMA; i++) { + if (aup->tx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); + } + err_mdiobus_reg: + mdiobus_free(aup->mii_bus); + err_mdiobus_alloc: ++ iounmap(aup->macdma); ++err_remap3: + iounmap(aup->enable); + err_remap2: + iounmap(aup->mac); + err_remap1: + dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); + err_vaddr: + free_netdev(dev); + err_alloc: ++ release_mem_region(macdma->start, resource_size(macdma)); ++err_macdma: + release_mem_region(macen->start, resource_size(macen)); + err_request: + release_mem_region(base->start, resource_size(base)); + out: + return err; + } + + static int __devexit au1000_remove(struct platform_device *pdev) + { + struct net_device *dev = platform_get_drvdata(pdev); + struct au1000_private *aup = netdev_priv(dev); + int i; + struct resource *base, *macen; + + platform_set_drvdata(pdev, NULL); + + unregister_netdev(dev); + mdiobus_unregister(aup->mii_bus); + mdiobus_free(aup->mii_bus); + + for (i = 0; i < NUM_RX_DMA; i++) + if (aup->rx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); + + for (i = 0; i < NUM_TX_DMA; i++) + if (aup->tx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); + + dma_free_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); + ++ iounmap(aup->macdma); + iounmap(aup->mac); + iounmap(aup->enable); + ++ base = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ release_mem_region(base->start, resource_size(base)); ++ + base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(base->start, resource_size(base)); + + macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); + release_mem_region(macen->start, resource_size(macen)); + + free_netdev(dev); + + return 0; + } + + static struct platform_driver au1000_eth_driver = { + .probe = au1000_probe, + .remove = __devexit_p(au1000_remove), + .driver = { + .name = "au1000-eth", + .owner = THIS_MODULE, + }, + }; + MODULE_ALIAS("platform:au1000-eth"); + + + static int __init au1000_init_module(void) + { + return platform_driver_register(&au1000_eth_driver); + } + + static void __exit au1000_exit_module(void) + { + platform_driver_unregister(&au1000_eth_driver); + } + + module_init(au1000_init_module); + module_exit(au1000_exit_module); diff --combined drivers/net/ethernet/amd/au1000_eth.h index 4b7f7ad,6229c77..4b7f7ad --- a/drivers/net/ethernet/amd/au1000_eth.h +++ b/drivers/net/ethernet/amd/au1000_eth.h @@@ -124,7 -124,7 +124,7 @@@ struct au1000_private */ struct mac_reg *mac; /* mac registers */ u32 *enable; /* address of MAC Enable Register */ - + void __iomem *macdma; /* base of MAC DMA port */ u32 vaddr; /* virtual address of rx/tx buffers */ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 0000000,2f92487..627a580 mode 000000,100644..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@@ -1,0 -1,2069 +1,2075 @@@ + /* bnx2x.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein eilong@broadcom.com + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + */ + + #ifndef BNX2X_H + #define BNX2X_H + #include <linux/netdevice.h> + #include <linux/dma-mapping.h> + #include <linux/types.h> + + /* compilation time flags */ + + /* define this to make the driver freeze on error to allow getting debug info + * (you will need to reboot afterwards) */ + /* #define BNX2X_STOP_ON_ERROR */ + + #define DRV_MODULE_VERSION "1.70.00-0" + #define DRV_MODULE_RELDATE "2011/06/13" + #define BNX2X_BC_VER 0x040200 + + #if defined(CONFIG_DCB) + #define BCM_DCBNL + #endif + #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) + #define BCM_CNIC 1 + #include "../cnic_if.h" + #endif + + #ifdef BCM_CNIC + #define BNX2X_MIN_MSIX_VEC_CNT 3 + #define BNX2X_MSIX_VEC_FP_START 2 + #else + #define BNX2X_MIN_MSIX_VEC_CNT 2 + #define BNX2X_MSIX_VEC_FP_START 1 + #endif + + #include <linux/mdio.h> + + #include "bnx2x_reg.h" + #include "bnx2x_fw_defs.h" + #include "bnx2x_hsi.h" + #include "bnx2x_link.h" + #include "bnx2x_sp.h" + #include "bnx2x_dcb.h" + #include "bnx2x_stats.h" + + /* error/debug prints */ + + #define DRV_MODULE_NAME "bnx2x" + + /* for messages that are currently off */ + #define BNX2X_MSG_OFF 0 + #define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */ + #define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */ + #define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */ + #define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */ + #define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ + #define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ + + /* regular debug print */ + #define DP(__mask, fmt, ...) \ + do { \ + if (bp->msg_enable & (__mask)) \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ + } while (0) + + #define DP_CONT(__mask, fmt, ...) \ + do { \ + if (bp->msg_enable & (__mask)) \ + pr_cont(fmt, ##__VA_ARGS__); \ + } while (0) + + /* errors debug print */ + #define BNX2X_DBG_ERR(fmt, ...) \ + do { \ + if (netif_msg_probe(bp)) \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ + } while (0) + + /* for errors (never masked) */ + #define BNX2X_ERR(fmt, ...) \ + do { \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ + } while (0) + + #define BNX2X_ERROR(fmt, ...) \ + pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) + + + /* before we have a dev->name use dev_info() */ + #define BNX2X_DEV_INFO(fmt, ...) \ + do { \ + if (netif_msg_probe(bp)) \ + dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \ + } while (0) + + #ifdef BNX2X_STOP_ON_ERROR + void bnx2x_int_disable(struct bnx2x *bp); + #define bnx2x_panic() \ + do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_int_disable(bp); \ + bnx2x_panic_dump(bp); \ + } while (0) + #else + #define bnx2x_panic() \ + do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_panic_dump(bp); \ + } while (0) + #endif + + #define bnx2x_mc_addr(ha) ((ha)->addr) + #define bnx2x_uc_addr(ha) ((ha)->addr) + + #define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) + #define U64_HI(x) (u32)(((u64)(x)) >> 32) + #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) + + + #define REG_ADDR(bp, offset) ((bp->regview) + (offset)) + + #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) + #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) + #define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset)) + + #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) + #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) + #define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) + + #define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset) + #define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val) + + #define REG_RD_DMAE(bp, offset, valp, len32) \ + do { \ + bnx2x_read_dmae(bp, offset, len32);\ + memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \ + } while (0) + + #define REG_WR_DMAE(bp, offset, valp, len32) \ + do { \ + memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \ + bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \ + offset, len32); \ + } while (0) + + #define REG_WR_DMAE_LEN(bp, offset, valp, len32) \ + REG_WR_DMAE(bp, offset, valp, len32) + + #define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ + do { \ + memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ + bnx2x_write_big_buf_wb(bp, addr, len32); \ + } while (0) + + #define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \ + offsetof(struct shmem_region, field)) + #define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) + #define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) + + #define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \ + offsetof(struct shmem2_region, field)) + #define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) + #define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) + #define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \ + offsetof(struct mf_cfg, field)) + #define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \ + offsetof(struct mf2_cfg, field)) + + #define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field)) + #define MF_CFG_WR(bp, field, val) REG_WR(bp,\ + MF_CFG_ADDR(bp, field), (val)) + #define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field)) + + #define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \ + (SHMEM2_RD((bp), size) > \ + offsetof(struct shmem2_region, field))) + + #define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) + #define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) + + /* SP SB indices */ + + /* General SP events - stats query, cfc delete, etc */ + #define HC_SP_INDEX_ETH_DEF_CONS 3 + + /* EQ completions */ + #define HC_SP_INDEX_EQ_CONS 7 + + /* FCoE L2 connection completions */ + #define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 + #define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 + /* iSCSI L2 */ + #define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 + #define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 + + /* Special clients parameters */ + + /* SB indices */ + /* FCoE L2 */ + #define BNX2X_FCOE_L2_RX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS]) + + #define BNX2X_FCOE_L2_TX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS]) + + /** + * CIDs and CLIDs: + * CLIDs below is a CLID for func 0, then the CLID for other + * functions will be calculated by the formula: + * + * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X + * + */ -/* iSCSI L2 */ -#define BNX2X_ISCSI_ETH_CL_ID_IDX 1 -#define BNX2X_ISCSI_ETH_CID 49 - -/* FCoE L2 */ -#define BNX2X_FCOE_ETH_CL_ID_IDX 2 -#define BNX2X_FCOE_ETH_CID 50 ++enum { ++ BNX2X_ISCSI_ETH_CL_ID_IDX, ++ BNX2X_FCOE_ETH_CL_ID_IDX, ++ BNX2X_MAX_CNIC_ETH_CL_ID_IDX, ++}; ++ ++#define BNX2X_CNIC_START_ETH_CID 48 ++enum { ++ /* iSCSI L2 */ ++ BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, ++ /* FCoE L2 */ ++ BNX2X_FCOE_ETH_CID, ++}; + + /** Additional rings budgeting */ + #ifdef BCM_CNIC + #define CNIC_PRESENT 1 + #define FCOE_PRESENT 1 + #else + #define CNIC_PRESENT 0 + #define FCOE_PRESENT 0 + #endif /* BCM_CNIC */ + #define NON_ETH_CONTEXT_USE (FCOE_PRESENT) + + #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR + + #define SM_RX_ID 0 + #define SM_TX_ID 1 + + /* defines for multiple tx priority indices */ + #define FIRST_TX_ONLY_COS_INDEX 1 + #define FIRST_TX_COS_INDEX 0 + + /* defines for decodeing the fastpath index and the cos index out of the + * transmission queue index + */ + #define MAX_TXQS_PER_COS FP_SB_MAX_E1x + + #define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS) + #define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS) + + /* rules for calculating the cids of tx-only connections */ + #define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) + #define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) + + /* fp index inside class of service range */ + #define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) + + /* + * 0..15 eth cos0 + * 16..31 eth cos1 if applicable + * 32..47 eth cos2 If applicable + * fcoe queue follows eth queues (16, 32, 48 depending on cos) + */ + #define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) + #define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) + + /* fast path */ + struct sw_rx_bd { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + }; + + struct sw_tx_bd { + struct sk_buff *skb; + u16 first_bd; + u8 flags; + /* Set on the first BD descriptor when there is a split BD */ + #define BNX2X_TSO_SPLIT_BD (1<<0) + }; + + struct sw_rx_page { + struct page *page; + DEFINE_DMA_UNMAP_ADDR(mapping); + }; + + union db_prod { + struct doorbell_set_prod data; + u32 raw; + }; + + /* dropless fc FW/HW related params */ + #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) + #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ + ETH_MAX_AGGREGATION_QUEUES_E1 :\ + ETH_MAX_AGGREGATION_QUEUES_E1H_E2) + #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) + #define FW_PREFETCH_CNT 16 + #define DROPLESS_FC_HEADROOM 100 + + /* MC hsi */ + #define BCM_PAGE_SHIFT 12 + #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) + #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) + #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) + + #define PAGES_PER_SGE_SHIFT 0 + #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) + #define SGE_PAGE_SIZE PAGE_SIZE + #define SGE_PAGE_SHIFT PAGE_SHIFT + #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) + + /* SGE ring related macros */ + #define NUM_RX_SGE_PAGES 2 + #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) + #define NEXT_PAGE_SGE_DESC_CNT 2 + #define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) + /* RX_SGE_CNT is promised to be a power of 2 */ + #define RX_SGE_MASK (RX_SGE_CNT - 1) + #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) + #define MAX_RX_SGE (NUM_RX_SGE - 1) + #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ + (MAX_RX_SGE_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ + (x) + 1) + #define RX_SGE(x) ((x) & MAX_RX_SGE) + + /* + * Number of required SGEs is the sum of two: + * 1. Number of possible opened aggregations (next packet for + * these aggregations will probably consume SGE immidiatelly) + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only + * after placement on BD for new TPA aggregation) + * + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page + */ + #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ + (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) + #define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ + MAX_RX_SGE_CNT) + #define SGE_TH_LO(bp) (NUM_SGE_REQ + \ + NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) + #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) + + /* Manipulate a bit vector defined as an array of u64 */ + + /* Number of bits in one sge_mask array element */ + #define BIT_VEC64_ELEM_SZ 64 + #define BIT_VEC64_ELEM_SHIFT 6 + #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) + + + #define __BIT_VEC64_SET_BIT(el, bit) \ + do { \ + el = ((el) | ((u64)0x1 << (bit))); \ + } while (0) + + #define __BIT_VEC64_CLEAR_BIT(el, bit) \ + do { \ + el = ((el) & (~((u64)0x1 << (bit)))); \ + } while (0) + + + #define BIT_VEC64_SET_BIT(vec64, idx) \ + __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + + #define BIT_VEC64_CLEAR_BIT(vec64, idx) \ + __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + + #define BIT_VEC64_TEST_BIT(vec64, idx) \ + (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ + ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) + + /* Creates a bitmask of all ones in less significant bits. + idx - index of the most significant bit in the created mask */ + #define BIT_VEC64_ONES_MASK(idx) \ + (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) + #define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) + + /*******************************************************/ + + + + /* Number of u64 elements in SGE mask array */ + #define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ + BIT_VEC64_ELEM_SZ) + #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) + #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) + + union host_hc_status_block { + /* pointer to fp status block e1x */ + struct host_hc_status_block_e1x *e1x_sb; + /* pointer to fp status block e2 */ + struct host_hc_status_block_e2 *e2_sb; + }; + + struct bnx2x_agg_info { + /* + * First aggregation buffer is an skb, the following - are pages. + * We will preallocate the skbs for each aggregation when + * we open the interface and will replace the BD at the consumer + * with this one when we receive the TPA_START CQE in order to + * keep the Rx BD ring consistent. + */ + struct sw_rx_bd first_buf; + u8 tpa_state; + #define BNX2X_TPA_START 1 + #define BNX2X_TPA_STOP 2 + #define BNX2X_TPA_ERROR 3 + u8 placement_offset; + u16 parsing_flags; + u16 vlan_tag; + u16 len_on_bd; + }; + + #define Q_STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) + + struct bnx2x_fp_txdata { + + struct sw_tx_bd *tx_buf_ring; + + union eth_tx_bd_types *tx_desc_ring; + dma_addr_t tx_desc_mapping; + + u32 cid; + + union db_prod tx_db; + + u16 tx_pkt_prod; + u16 tx_pkt_cons; + u16 tx_bd_prod; + u16 tx_bd_cons; + + unsigned long tx_pkt; + + __le16 *tx_cons_sb; + + int txq_index; + }; + + struct bnx2x_fastpath { + struct bnx2x *bp; /* parent */ + + #define BNX2X_NAPI_WEIGHT 128 + struct napi_struct napi; + union host_hc_status_block status_blk; + /* chip independed shortcuts into sb structure */ + __le16 *sb_index_values; + __le16 *sb_running_index; + /* chip independed shortcut into rx_prods_offset memory */ + u32 ustorm_rx_prods_offset; + + u32 rx_buf_size; + + dma_addr_t status_blk_mapping; + + u8 max_cos; /* actual number of active tx coses */ + struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; + + struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ + struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ + + struct eth_rx_bd *rx_desc_ring; + dma_addr_t rx_desc_mapping; + + union eth_rx_cqe *rx_comp_ring; + dma_addr_t rx_comp_mapping; + + /* SGE ring */ + struct eth_rx_sge *rx_sge_ring; + dma_addr_t rx_sge_mapping; + + u64 sge_mask[RX_SGE_MASK_LEN]; + + u32 cid; + + __le16 fp_hc_idx; + + u8 index; /* number in fp array */ + u8 cl_id; /* eth client id */ + u8 cl_qzone_id; + u8 fw_sb_id; /* status block number in FW */ + u8 igu_sb_id; /* status block number in HW */ + + u16 rx_bd_prod; + u16 rx_bd_cons; + u16 rx_comp_prod; + u16 rx_comp_cons; + u16 rx_sge_prod; + /* The last maximal completed SGE */ + u16 last_max_sge; + __le16 *rx_cons_sb; + unsigned long rx_pkt, + rx_calls; + + /* TPA related */ + struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; + u8 disable_tpa; + #ifdef BNX2X_STOP_ON_ERROR + u64 tpa_queue_used; + #endif + + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + + /* The size is calculated using the following: + sizeof name field from netdev structure + + 4 ('-Xx-' string) + + 4 (for the digits and to make it DWORD aligned) */ + #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) + char name[FP_NAME_SIZE]; + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* Queue State object */ + struct bnx2x_queue_sp_obj q_obj; + + }; + + #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) + + /* Use 2500 as a mini-jumbo MTU for FCoE */ + #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 + + /* FCoE L2 `fastpath' entry is right after the eth entries */ + #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) + #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) + #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) + #define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ + txdata[FIRST_TX_COS_INDEX].var) + + + #define IS_ETH_FP(fp) (fp->index < \ + BNX2X_NUM_ETH_QUEUES(fp->bp)) + #ifdef BCM_CNIC + #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) + #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) + #else + #define IS_FCOE_FP(fp) false + #define IS_FCOE_IDX(idx) false + #endif + + + /* MC hsi */ + #define MAX_FETCH_BD 13 /* HW max BDs per packet */ + #define RX_COPY_THRESH 92 + + #define NUM_TX_RINGS 16 + #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) + #define NEXT_PAGE_TX_DESC_CNT 1 + #define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) + #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) + #define MAX_TX_BD (NUM_TX_BD - 1) + #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) + #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ + (MAX_TX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ + (x) + 1) + #define TX_BD(x) ((x) & MAX_TX_BD) + #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) + + /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ + #define NUM_RX_RINGS 8 + #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) + #define NEXT_PAGE_RX_DESC_CNT 2 + #define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) + #define RX_DESC_MASK (RX_DESC_CNT - 1) + #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) + #define MAX_RX_BD (NUM_RX_BD - 1) + #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) + + /* dropless fc calculations for BDs + * + * Number of BDs should as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT + * "next" elements on each page + */ + #define NUM_BD_REQ BRB_SIZE(bp) + #define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ + MAX_RX_DESC_CNT) + #define BD_TH_LO(bp) (NUM_BD_REQ + \ + NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ + FW_DROP_LEVEL(bp)) + #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) + + #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) + + #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ + ETH_MIN_RX_CQES_WITH_TPA_E1 : \ + ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) + #define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA + #define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) + #define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ + MIN_RX_AVAIL)) + + #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ + (MAX_RX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ + (x) + 1) + #define RX_BD(x) ((x) & MAX_RX_BD) + + /* + * As long as CQE is X times bigger than BD entry we have to allocate X times + * more pages for CQ ring in order to keep it balanced with BD ring + */ + #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) + #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) + #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) + #define NEXT_PAGE_RCQ_DESC_CNT 1 + #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) + #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) + #define MAX_RCQ_BD (NUM_RCQ_BD - 1) + #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) + #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ + (MAX_RCQ_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ + (x) + 1) + #define RCQ_BD(x) ((x) & MAX_RCQ_BD) + + /* dropless fc calculations for RCQs + * + * Number of RCQs should be as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT + * "next" elements on each page + */ + #define NUM_RCQ_REQ BRB_SIZE(bp) + #define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ + MAX_RCQ_DESC_CNT) + #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ + NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ + FW_DROP_LEVEL(bp)) + #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) + + + /* This is needed for determining of last_max */ + #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) + #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) + + + #define BNX2X_SWCID_SHIFT 17 + #define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) + + /* used on a CID received from the HW */ + #define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) + #define CQE_CMD(x) (le32_to_cpu(x) >> \ + COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) + + #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \ + le32_to_cpu((bd)->addr_lo)) + #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) + + #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ + #define BNX2X_DB_SHIFT 7 /* 128 bytes*/ + #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) + #error "Min DB doorbell stride is 8" + #endif + #define DPM_TRIGER_TYPE 0x40 + #define DOORBELL(bp, cid, val) \ + do { \ + writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ + DPM_TRIGER_TYPE); \ + } while (0) + + + /* TX CSUM helpers */ + #define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ + skb->csum_offset) + #define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ + skb->csum_offset)) + + #define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) + + #define XMIT_PLAIN 0 + #define XMIT_CSUM_V4 0x1 + #define XMIT_CSUM_V6 0x2 + #define XMIT_CSUM_TCP 0x4 + #define XMIT_GSO_V4 0x8 + #define XMIT_GSO_V6 0x10 + + #define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6) + #define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6) + + + /* stuff added to make the code fit 80Col */ + #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) + #define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) + #define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) + #define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) + #define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) + + #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG + + #define BNX2X_IP_CSUM_ERR(cqe) \ + (!((cqe)->fast_path_cqe.status_flags & \ + ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ + ((cqe)->fast_path_cqe.type_error_flags & \ + ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) + + #define BNX2X_L4_CSUM_ERR(cqe) \ + (!((cqe)->fast_path_cqe.status_flags & \ + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ + ((cqe)->fast_path_cqe.type_error_flags & \ + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) + + #define BNX2X_RX_CSUM_OK(cqe) \ + (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) + + #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ + (((le16_to_cpu(flags) & \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \ + == PRS_FLAG_OVERETH_IPV4) + #define BNX2X_RX_SUM_FIX(cqe) \ + BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) + + + #define FP_USB_FUNC_OFF \ + offsetof(struct cstorm_status_block_u, func) + #define FP_CSB_FUNC_OFF \ + offsetof(struct cstorm_status_block_c, func) + + #define HC_INDEX_ETH_RX_CQ_CONS 1 + + #define HC_INDEX_OOO_TX_CQ_CONS 4 + + #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 + + #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 + + #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 + + #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 + + #define BNX2X_RX_SB_INDEX \ + (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) + + #define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0 + + #define BNX2X_TX_SB_INDEX_COS0 \ + (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]) + + /* end of fast path */ + + /* common */ + + struct bnx2x_common { + + u32 chip_id; + /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ + #define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0) + + #define CHIP_NUM(bp) (bp->common.chip_id >> 16) + #define CHIP_NUM_57710 0x164e + #define CHIP_NUM_57711 0x164f + #define CHIP_NUM_57711E 0x1650 + #define CHIP_NUM_57712 0x1662 + #define CHIP_NUM_57712_MF 0x1663 + #define CHIP_NUM_57713 0x1651 + #define CHIP_NUM_57713E 0x1652 + #define CHIP_NUM_57800 0x168a + #define CHIP_NUM_57800_MF 0x16a5 + #define CHIP_NUM_57810 0x168e + #define CHIP_NUM_57810_MF 0x16ae + #define CHIP_NUM_57840 0x168d + #define CHIP_NUM_57840_MF 0x16ab + #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) + #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) + #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) + #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) + #define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) + #define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) + #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) + #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) + #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) + #define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) + #define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) + #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ + CHIP_IS_57711E(bp)) + #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ + CHIP_IS_57712_MF(bp)) + #define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ + CHIP_IS_57800_MF(bp) || \ + CHIP_IS_57810(bp) || \ + CHIP_IS_57810_MF(bp) || \ + CHIP_IS_57840(bp) || \ + CHIP_IS_57840_MF(bp)) + #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) + #define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) + #define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) + + #define CHIP_REV_SHIFT 12 + #define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) + #define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) + #define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) + #define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) + /* assume maximum 5 revisions */ + #define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) + /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ + #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + !(CHIP_REV_VAL(bp) & 0x00001000)) + /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ + #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + (CHIP_REV_VAL(bp) & 0x00001000)) + + #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ + ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) + + #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) + #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) + #define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ + (CHIP_REV_SHIFT + 1)) \ + << CHIP_REV_SHIFT) + #define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ + CHIP_REV_SIM(bp) :\ + CHIP_REV_VAL(bp)) + #define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Bx)) + #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Ax)) + + int flash_size; + #define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ + #define BNX2X_NVRAM_TIMEOUT_COUNT 30000 + #define BNX2X_NVRAM_PAGE_SIZE 256 + + u32 shmem_base; + u32 shmem2_base; + u32 mf_cfg_base; + u32 mf2_cfg_base; + + u32 hw_config; + + u32 bc_ver; + + u8 int_block; + #define INT_BLOCK_HC 0 + #define INT_BLOCK_IGU 1 + #define INT_BLOCK_MODE_NORMAL 0 + #define INT_BLOCK_MODE_BW_COMP 2 + #define CHIP_INT_MODE_IS_NBC(bp) \ + (!CHIP_IS_E1x(bp) && \ + !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) + #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) + + u8 chip_port_mode; + #define CHIP_4_PORT_MODE 0x0 + #define CHIP_2_PORT_MODE 0x1 + #define CHIP_PORT_MODE_NONE 0x2 + #define CHIP_MODE(bp) (bp->common.chip_port_mode) + #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE) + }; + + /* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ + #define BNX2X_IGU_STAS_MSG_VF_CNT 64 + #define BNX2X_IGU_STAS_MSG_PF_CNT 4 + + /* end of common */ + + /* port */ + + struct bnx2x_port { + u32 pmf; + + u32 link_config[LINK_CONFIG_SIZE]; + + u32 supported[LINK_CONFIG_SIZE]; + /* link settings - missing defines */ + #define SUPPORTED_2500baseX_Full (1 << 15) + + u32 advertising[LINK_CONFIG_SIZE]; + /* link settings - missing defines */ + #define ADVERTISED_2500baseX_Full (1 << 15) + + u32 phy_addr; + + /* used to synchronize phy accesses */ + struct mutex phy_mutex; + int need_hw_lock; + + u32 port_stx; + + struct nig_stats old_nig_stats; + }; + + /* end of port */ + + #define STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_stats, stat_name) / 4) + + /* slow path */ + + /* slow path work-queue */ + extern struct workqueue_struct *bnx2x_wq; + + #define BNX2X_MAX_NUM_OF_VFS 64 + #define BNX2X_VF_ID_INVALID 0xFF + + /* + * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is + * control by the number of fast-path status blocks supported by the + * device (HW/FW). Each fast-path status block (FP-SB) aka non-default + * status block represents an independent interrupts context that can + * serve a regular L2 networking queue. However special L2 queues such + * as the FCoE queue do not require a FP-SB and other components like + * the CNIC may consume FP-SB reducing the number of possible L2 queues + * + * If the maximum number of FP-SB available is X then: + * a. If CNIC is supported it consumes 1 FP-SB thus the max number of + * regular L2 queues is Y=X-1 + * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * c. If the FCoE L2 queue is supported the actual number of L2 queues + * is Y+1 + * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for + * slow-path interrupts) or Y+2 if CNIC is supported (one additional + * FP interrupt context for the CNIC). + * e. The number of HW context (CID count) is always X or X+1 if FCoE + * L2 queue is supported. the cid for the FCoE L2 queue is always X. + */ + + /* fast-path interrupt contexts E1x */ + #define FP_SB_MAX_E1x 16 + /* fast-path interrupt contexts E2 */ + #define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 + + union cdu_context { + struct eth_context eth; + char pad[1024]; + }; + + /* CDU host DB constants */ + #define CDU_ILT_PAGE_SZ_HW 3 + #define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ + #define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) + + #ifdef BCM_CNIC + #define CNIC_ISCSI_CID_MAX 256 + #define CNIC_FCOE_CID_MAX 2048 + #define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) + #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) + #endif + + #define QM_ILT_PAGE_SZ_HW 0 + #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ + #define QM_CID_ROUND 1024 + + #ifdef BCM_CNIC + /* TM (timers) host DB constants */ + #define TM_ILT_PAGE_SZ_HW 0 + #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ + /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ + #define TM_CONN_NUM 1024 + #define TM_ILT_SZ (8 * TM_CONN_NUM) + #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) + + /* SRC (Searcher) host DB constants */ + #define SRC_ILT_PAGE_SZ_HW 0 + #define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ + #define SRC_HASH_BITS 10 + #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ + #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) + #define SRC_T2_SZ SRC_ILT_SZ + #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) + + #endif + + #define MAX_DMAE_C 8 + + /* DMA memory not used in fastpath */ + struct bnx2x_slowpath { + union { + struct mac_configuration_cmd e1x; + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + + union { + struct tstorm_eth_mac_filter_config e1x; + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + union { + struct mac_configuration_cmd e1; + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + struct eth_rss_update_ramrod_data rss_rdata; + + /* Queue State related ramrods are always sent under rtnl_lock */ + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_rdata; + + union { + struct function_start_data func_start; + /* pfc configuration for DCBX ramrod */ + struct flow_control_configuration pfc_config; + } func_rdata; + + /* used by dmae command executer */ + struct dmae_command dmae[MAX_DMAE_C]; + + u32 stats_comp; + union mac_stats mac_stats; + struct nig_stats nig_stats; + struct host_port_stats port_stats; + struct host_func_stats func_stats; + struct host_func_stats func_stats_base; + + u32 wb_comp; + u32 wb_data[4]; + }; + + #define bnx2x_sp(bp, var) (&bp->slowpath->var) + #define bnx2x_sp_mapping(bp, var) \ + (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) + + + /* attn group wiring */ + #define MAX_DYNAMIC_ATTN_GRPS 8 + + struct attn_route { + u32 sig[5]; + }; + + struct iro { + u32 base; + u16 m1; + u16 m2; + u16 m3; + u16 size; + }; + + struct hw_context { + union cdu_context *vcxt; + dma_addr_t cxt_mapping; + size_t size; + }; + + /* forward */ + struct bnx2x_ilt; + + + enum bnx2x_recovery_state { + BNX2X_RECOVERY_DONE, + BNX2X_RECOVERY_INIT, + BNX2X_RECOVERY_WAIT, + BNX2X_RECOVERY_FAILED + }; + + /* + * Event queue (EQ or event ring) MC hsi + * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 + */ + #define NUM_EQ_PAGES 1 + #define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem)) + #define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1) + #define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES) + #define EQ_DESC_MASK (NUM_EQ_DESC - 1) + #define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2) + + /* depends on EQ_DESC_CNT_PAGE being a power of 2 */ + #define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \ + (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1) + + /* depends on the above and on NUM_EQ_PAGES being a power of 2 */ + #define EQ_DESC(x) ((x) & EQ_DESC_MASK) + + #define BNX2X_EQ_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_EQ_CONS]) + + /* This is a data that will be used to create a link report message. + * We will keep the data used for the last link report in order + * to prevent reporting the same link parameters twice. + */ + struct bnx2x_link_report_data { + u16 line_speed; /* Effective line speed */ + unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */ + }; + + enum { + BNX2X_LINK_REPORT_FD, /* Full DUPLEX */ + BNX2X_LINK_REPORT_LINK_DOWN, + BNX2X_LINK_REPORT_RX_FC_ON, + BNX2X_LINK_REPORT_TX_FC_ON, + }; + + enum { + BNX2X_PORT_QUERY_IDX, + BNX2X_PF_QUERY_IDX, + BNX2X_FIRST_QUEUE_QUERY_IDX, + }; + + struct bnx2x_fw_stats_req { + struct stats_query_header hdr; + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; + }; + + struct bnx2x_fw_stats_data { + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; + struct per_queue_stats queue_stats[1]; + }; + + /* Public slow path states */ + enum { + BNX2X_SP_RTNL_SETUP_TC, + BNX2X_SP_RTNL_TX_TIMEOUT, + }; + + + struct bnx2x { + /* Fields used in the tx and intr/napi performance paths + * are grouped together in the beginning of the structure + */ + struct bnx2x_fastpath *fp; + void __iomem *regview; + void __iomem *doorbells; + u16 db_size; + + u8 pf_num; /* absolute PF number */ + u8 pfid; /* per-path PF number */ + int base_fw_ndsb; /**/ + #define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) + #define BP_PORT(bp) (bp->pfid & 1) + #define BP_FUNC(bp) (bp->pfid) + #define BP_ABS_FUNC(bp) (bp->pf_num) + #define BP_VN(bp) ((bp)->pfid >> 1) + #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) + #define BP_L_ID(bp) (BP_VN(bp) << 2) + #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ + (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) + #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) + + struct net_device *dev; + struct pci_dev *pdev; + + const struct iro *iro_arr; + #define IRO (bp->iro_arr) + + enum bnx2x_recovery_state recovery_state; + int is_leader; + struct msix_entry *msix_table; + + int tx_ring_size; + + /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ + #define ETH_OVREHEAD (ETH_HLEN + 8 + 8) + #define ETH_MIN_PACKET_SIZE 60 + #define ETH_MAX_PACKET_SIZE 1500 + #define ETH_MAX_JUMBO_PACKET_SIZE 9600 + + /* Max supported alignment is 256 (8 shift) */ + #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ + L1_CACHE_SHIFT : 8) + /* FW use 2 Cache lines Alignment for start packet and size */ + #define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) + #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) + + struct host_sp_status_block *def_status_blk; + #define DEF_SB_IGU_ID 16 + #define DEF_SB_ID HC_SP_SB_ID + __le16 def_idx; + __le16 def_att_idx; + u32 attn_state; + struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; + + /* slow path ring */ + struct eth_spe *spq; + dma_addr_t spq_mapping; + u16 spq_prod_idx; + struct eth_spe *spq_prod_bd; + struct eth_spe *spq_last_bd; + __le16 *dsb_sp_prod; + atomic_t cq_spq_left; /* ETH_XXX ramrods credit */ + /* used to synchronize spq accesses */ + spinlock_t spq_lock; + + /* event queue */ + union event_ring_elem *eq_ring; + dma_addr_t eq_mapping; + u16 eq_prod; + u16 eq_cons; + __le16 *eq_cons_sb; + atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ + + + + /* Counter for marking that there is a STAT_QUERY ramrod pending */ + u16 stats_pending; + /* Counter for completed statistics ramrods */ + u16 stats_comp; + + /* End of fields used in the performance code paths */ + + int panic; + int msg_enable; + + u32 flags; + #define PCIX_FLAG (1 << 0) + #define PCI_32BIT_FLAG (1 << 1) + #define ONE_PORT_FLAG (1 << 2) + #define NO_WOL_FLAG (1 << 3) + #define USING_DAC_FLAG (1 << 4) + #define USING_MSIX_FLAG (1 << 5) + #define USING_MSI_FLAG (1 << 6) + #define DISABLE_MSI_FLAG (1 << 7) + #define TPA_ENABLE_FLAG (1 << 8) + #define NO_MCP_FLAG (1 << 9) + + #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) + #define MF_FUNC_DIS (1 << 11) + #define OWN_CNIC_IRQ (1 << 12) + #define NO_ISCSI_OOO_FLAG (1 << 13) + #define NO_ISCSI_FLAG (1 << 14) + #define NO_FCOE_FLAG (1 << 15) + + #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) + #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) + #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) + + int pm_cap; + int mrrs; + + struct delayed_work sp_task; + struct delayed_work sp_rtnl_task; + + struct delayed_work period_task; + struct timer_list timer; + int current_interval; + + u16 fw_seq; + u16 fw_drv_pulse_wr_seq; + u32 func_stx; + + struct link_params link_params; + struct link_vars link_vars; + u32 link_cnt; + struct bnx2x_link_report_data last_reported_link; + + struct mdio_if_info mdio; + + struct bnx2x_common common; + struct bnx2x_port port; + + struct cmng_struct_per_port cmng; + u32 vn_weight_sum; + u32 mf_config[E1HVN_MAX]; + u32 mf2_config[E2_FUNC_MAX]; + u32 path_has_ovlan; /* E3 */ + u16 mf_ov; + u8 mf_mode; + #define IS_MF(bp) (bp->mf_mode != 0) + #define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) + #define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) + + u8 wol; + + int rx_ring_size; + + u16 tx_quick_cons_trip_int; + u16 tx_quick_cons_trip; + u16 tx_ticks_int; + u16 tx_ticks; + + u16 rx_quick_cons_trip_int; + u16 rx_quick_cons_trip; + u16 rx_ticks_int; + u16 rx_ticks; + /* Maximal coalescing timeout in us */ + #define BNX2X_MAX_COALESCE_TOUT (0xf0*12) + + u32 lin_cnt; + + u16 state; + #define BNX2X_STATE_CLOSED 0 + #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 + #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 + #define BNX2X_STATE_OPEN 0x3000 + #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 + #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 + + #define BNX2X_STATE_DIAG 0xe000 + #define BNX2X_STATE_ERROR 0xf000 + + int multi_mode; + #define BNX2X_MAX_PRIORITY 8 + #define BNX2X_MAX_ENTRIES_PER_PRI 16 + #define BNX2X_MAX_COS 3 + #define BNX2X_MAX_TX_COS 2 + int num_queues; + int disable_tpa; + + u32 rx_mode; + #define BNX2X_RX_MODE_NONE 0 + #define BNX2X_RX_MODE_NORMAL 1 + #define BNX2X_RX_MODE_ALLMULTI 2 + #define BNX2X_RX_MODE_PROMISC 3 + #define BNX2X_MAX_MULTICAST 64 + + u8 igu_dsb_id; + u8 igu_base_sb; + u8 igu_sb_cnt; + dma_addr_t def_status_blk_mapping; + + struct bnx2x_slowpath *slowpath; + dma_addr_t slowpath_mapping; + + /* Total number of FW statistics requests */ + u8 fw_stats_num; + + /* + * This is a memory buffer that will contain both statistics + * ramrod request and data. + */ + void *fw_stats; + dma_addr_t fw_stats_mapping; + + /* + * FW statistics request shortcut (points at the + * beginning of fw_stats buffer). + */ + struct bnx2x_fw_stats_req *fw_stats_req; + dma_addr_t fw_stats_req_mapping; + int fw_stats_req_sz; + + /* + * FW statistics data shortcut (points at the begining of + * fw_stats buffer + fw_stats_req_sz). + */ + struct bnx2x_fw_stats_data *fw_stats_data; + dma_addr_t fw_stats_data_mapping; + int fw_stats_data_sz; + + struct hw_context context; + + struct bnx2x_ilt *ilt; + #define BP_ILT(bp) ((bp)->ilt) + #define ILT_MAX_LINES 256 + /* + * Maximum supported number of RSS queues: number of IGU SBs minus one that goes + * to CNIC. + */ + #define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) + + /* + * Maximum CID count that might be required by the bnx2x: + * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) + */ + #define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ + NON_ETH_CONTEXT_USE + CNIC_PRESENT) + #define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ + ILT_PAGE_CIDS)) + #define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT)) + + int qm_cid_count; + + int dropless_fc; + + #ifdef BCM_CNIC + u32 cnic_flags; + #define BNX2X_CNIC_FLAG_MAC_SET 1 + void *t2; + dma_addr_t t2_mapping; + struct cnic_ops __rcu *cnic_ops; + void *cnic_data; + u32 cnic_tag; + struct cnic_eth_dev cnic_eth_dev; + union host_hc_status_block cnic_sb; + dma_addr_t cnic_sb_mapping; + struct eth_spe *cnic_kwq; + struct eth_spe *cnic_kwq_prod; + struct eth_spe *cnic_kwq_cons; + struct eth_spe *cnic_kwq_last; + u16 cnic_kwq_pending; + u16 cnic_spq_pending; + u8 fip_mac[ETH_ALEN]; + struct mutex cnic_mutex; + struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; + + /* Start index of the "special" (CNIC related) L2 cleints */ + u8 cnic_base_cl_id; + #endif + + int dmae_ready; + /* used to synchronize dmae accesses */ + spinlock_t dmae_lock; + + /* used to protect the FW mail box */ + struct mutex fw_mb_mutex; + + /* used to synchronize stats collecting */ + int stats_state; + + /* used for synchronization of concurrent threads statistics handling */ + spinlock_t stats_lock; + + /* used by dmae command loader */ + struct dmae_command stats_dmae; + int executer_idx; + + u16 stats_counter; + struct bnx2x_eth_stats eth_stats; + + struct z_stream_s *strm; + void *gunzip_buf; + dma_addr_t gunzip_mapping; + int gunzip_outlen; + #define FW_BUF_SIZE 0x8000 + #define GUNZIP_BUF(bp) (bp->gunzip_buf) + #define GUNZIP_PHYS(bp) (bp->gunzip_mapping) + #define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen) + + struct raw_op *init_ops; + /* Init blocks offsets inside init_ops */ + u16 *init_ops_offsets; + /* Data blob - has 32 bit granularity */ + u32 *init_data; + u32 init_mode_flags; + #define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) + /* Zipped PRAM blobs - raw data */ + const u8 *tsem_int_table_data; + const u8 *tsem_pram_data; + const u8 *usem_int_table_data; + const u8 *usem_pram_data; + const u8 *xsem_int_table_data; + const u8 *xsem_pram_data; + const u8 *csem_int_table_data; + const u8 *csem_pram_data; + #define INIT_OPS(bp) (bp->init_ops) + #define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets) + #define INIT_DATA(bp) (bp->init_data) + #define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data) + #define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data) + #define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data) + #define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data) + #define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data) + #define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data) + #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) + #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) + + #define PHY_FW_VER_LEN 20 + char fw_ver[32]; + const struct firmware *firmware; + + /* DCB support on/off */ + u16 dcb_state; + #define BNX2X_DCB_STATE_OFF 0 + #define BNX2X_DCB_STATE_ON 1 + + /* DCBX engine mode */ + int dcbx_enabled; + #define BNX2X_DCBX_ENABLED_OFF 0 + #define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1 + #define BNX2X_DCBX_ENABLED_ON_NEG_ON 2 + #define BNX2X_DCBX_ENABLED_INVALID (-1) + + bool dcbx_mode_uset; + + struct bnx2x_config_dcbx_params dcbx_config_params; + struct bnx2x_dcbx_port_params dcbx_port_params; + int dcb_version; + + /* CAM credit pools */ + struct bnx2x_credit_pool_obj macs_pool; + + /* RX_MODE object */ + struct bnx2x_rx_mode_obj rx_mode_obj; + + /* MCAST object */ + struct bnx2x_mcast_obj mcast_obj; + + /* RSS configuration object */ + struct bnx2x_rss_config_obj rss_conf_obj; + + /* Function State controlling object */ + struct bnx2x_func_sp_obj func_obj; + + unsigned long sp_state; + + /* operation indication for the sp_rtnl task */ + unsigned long sp_rtnl_state; + + /* DCBX Negotation results */ + struct dcbx_features dcbx_local_feat; + u32 dcbx_error; + + #ifdef BCM_DCBNL + struct dcbx_features dcbx_remote_feat; + u32 dcbx_remote_flags; + #endif + u32 pending_max; + + /* multiple tx classes of service */ + u8 max_cos; + + /* priority to cos mapping */ + u8 prio_to_cos[8]; + }; + + /* Tx queues may be less or equal to Rx queues */ + extern int num_queues; + #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) + #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) + #define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) + + #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) + + #define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp) + /* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */ + + #define RSS_IPV4_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY + + #define RSS_IPV4_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY + + #define RSS_IPV6_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY + + #define RSS_IPV6_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY + + /* func init flags */ + #define FUNC_FLG_RSS 0x0001 + #define FUNC_FLG_STATS 0x0002 + /* removed FUNC_FLG_UNMATCHED 0x0004 */ + #define FUNC_FLG_TPA 0x0008 + #define FUNC_FLG_SPQ 0x0010 + #define FUNC_FLG_LEADING 0x0020 /* PF only */ + + + struct bnx2x_func_init_params { + /* dma */ + dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ + dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ + + u16 func_flgs; + u16 func_id; /* abs fid */ + u16 pf_id; + u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ + }; + + #define for_each_eth_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + + #define for_each_nondefault_eth_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + + #define for_each_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + + /* Skip forwarding FP */ + #define for_each_rx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_rx_queue(bp, var)) \ + continue; \ + else + + /* Skip OOO FP */ + #define for_each_tx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_tx_queue(bp, var)) \ + continue; \ + else + + #define for_each_nondefault_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + + #define for_each_cos_in_tx_queue(fp, var) \ + for ((var) = 0; (var) < (fp)->max_cos; (var)++) + + /* skip rx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ + #define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + + /* skip tx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ + #define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + + #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + + + + + /** + * bnx2x_set_mac_one - configure a single MAC address + * + * @bp: driver handle + * @mac: MAC to configure + * @obj: MAC object handle + * @set: if 'true' add a new MAC, otherwise - delete + * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) + * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) + * + * Configures one MAC according to provided parameters or continues the + * execution of previously scheduled commands if RAMROD_CONT is set in + * ramrod_flags. + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ + int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags); + /** + * Deletes all MACs configured for the specific MAC object. + * + * @param bp Function driver instance + * @param mac_obj MAC object to cleanup + * + * @return zero if all MACs were cleaned + */ + + /** + * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object + * + * @bp: driver handle + * @mac_obj: MAC object handle + * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) + * @wait_for_comp: if 'true' block until completion + * + * Deletes all MACs of the specific type (e.g. ETH, UC list). + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ + int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp); + + /* Init Function API */ + void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); + int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); + int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); + int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); + int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); + void bnx2x_read_mf_cfg(struct bnx2x *bp); + + + /* dmae */ + void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); + void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, + u32 len32); + void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); + u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type); + u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); + u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, + bool with_comp, u8 comp_type); + + + void bnx2x_calc_fc_adv(struct bnx2x *bp); + int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int cmd_type); + void bnx2x_update_coalesce(struct bnx2x *bp); + int bnx2x_get_cur_phy_idx(struct bnx2x *bp); + + static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, + int wait) + { + u32 val; + + do { + val = REG_RD(bp, reg); + if (val == expected) + break; + ms -= wait; + msleep(wait); + + } while (ms > 0); + + return val; + } + + #define BNX2X_ILT_ZALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) \ + memset(x, 0, size); \ + } while (0) + + #define BNX2X_ILT_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + + #define ILOG2(x) (ilog2((x))) + + #define ILT_NUM_PAGE_ENTRIES (3072) + /* In 57710/11 we use whole table since we have 8 func + * In 57712 we have only 4 func, but use same size per func, then only half of + * the table in use + */ + #define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8) + + #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) + /* + * the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ + #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) + #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) + + /* load/unload mode */ + #define LOAD_NORMAL 0 + #define LOAD_OPEN 1 + #define LOAD_DIAG 2 + #define UNLOAD_NORMAL 0 + #define UNLOAD_CLOSE 1 + #define UNLOAD_RECOVERY 2 + + + /* DMAE command defines */ + #define DMAE_TIMEOUT -1 + #define DMAE_PCI_ERROR -2 /* E2 and onward */ + #define DMAE_NOT_RDY -3 + #define DMAE_PCI_ERR_FLAG 0x80000000 + + #define DMAE_SRC_PCI 0 + #define DMAE_SRC_GRC 1 + + #define DMAE_DST_NONE 0 + #define DMAE_DST_PCI 1 + #define DMAE_DST_GRC 2 + + #define DMAE_COMP_PCI 0 + #define DMAE_COMP_GRC 1 + + /* E2 and onward - PCI error handling in the completion */ + + #define DMAE_COMP_REGULAR 0 + #define DMAE_COM_SET_ERR 1 + + #define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \ + DMAE_COMMAND_SRC_SHIFT) + #define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \ + DMAE_COMMAND_SRC_SHIFT) + + #define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \ + DMAE_COMMAND_DST_SHIFT) + #define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \ + DMAE_COMMAND_DST_SHIFT) + + #define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \ + DMAE_COMMAND_C_DST_SHIFT) + #define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \ + DMAE_COMMAND_C_DST_SHIFT) + + #define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE + + #define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT) + #define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT) + #define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT) + #define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT) + + #define DMAE_CMD_PORT_0 0 + #define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT + + #define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET + #define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET + #define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT + + #define DMAE_SRC_PF 0 + #define DMAE_SRC_VF 1 + + #define DMAE_DST_PF 0 + #define DMAE_DST_VF 1 + + #define DMAE_C_SRC 0 + #define DMAE_C_DST 1 + + #define DMAE_LEN32_RD_MAX 0x80 + #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) + + #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit + indicates eror */ + + #define MAX_DMAE_C_PER_PORT 8 + #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + BP_VN(bp)) + #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + E1HVN_MAX) + + /* PCIE link and speed */ + #define PCICFG_LINK_WIDTH 0x1f00000 + #define PCICFG_LINK_WIDTH_SHIFT 20 + #define PCICFG_LINK_SPEED 0xf0000 + #define PCICFG_LINK_SPEED_SHIFT 16 + + + #define BNX2X_NUM_TESTS 7 + + #define BNX2X_PHY_LOOPBACK 0 + #define BNX2X_MAC_LOOPBACK 1 + #define BNX2X_PHY_LOOPBACK_FAILED 1 + #define BNX2X_MAC_LOOPBACK_FAILED 2 + #define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ + BNX2X_PHY_LOOPBACK_FAILED) + + + #define STROM_ASSERT_ARRAY_SIZE 50 + + + /* must be used on a CID before placing it on a HW ring */ + #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ + (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ + (x)) + + #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) + #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) + + + #define BNX2X_BTR 4 + #define MAX_SPQ_PENDING 8 + + /* CMNG constants, as derived from system spec calculations */ + /* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ + #define DEF_MIN_RATE 100 + /* resolution of the rate shaping timer - 400 usec */ + #define RS_PERIODIC_TIMEOUT_USEC 400 + /* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer */ + #define QM_ARB_BYTES 160000 + /* resolution of Min algorithm 1:100 */ + #define MIN_RES 100 + /* how many bytes above threshold for the minimal credit of Min algorithm*/ + #define MIN_ABOVE_THRESH 32768 + /* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair */ + #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) + /* Memory of fairness algorithm . 2 cycles */ + #define FAIR_MEM 2 + + + #define ATTN_NIG_FOR_FUNC (1L << 8) + #define ATTN_SW_TIMER_4_FUNC (1L << 9) + #define GPIO_2_FUNC (1L << 10) + #define GPIO_3_FUNC (1L << 11) + #define GPIO_4_FUNC (1L << 12) + #define ATTN_GENERAL_ATTN_1 (1L << 13) + #define ATTN_GENERAL_ATTN_2 (1L << 14) + #define ATTN_GENERAL_ATTN_3 (1L << 15) + #define ATTN_GENERAL_ATTN_4 (1L << 13) + #define ATTN_GENERAL_ATTN_5 (1L << 14) + #define ATTN_GENERAL_ATTN_6 (1L << 15) + + #define ATTN_HARD_WIRED_MASK 0xff00 + #define ATTENTION_ID 4 + + + /* stuff added to make the code fit 80Col */ + + #define BNX2X_PMF_LINK_ASSERT \ + GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp)) + + #define BNX2X_MC_ASSERT_BITS \ + (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT)) + + #define BNX2X_MCP_ASSERT \ + GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT) + + #define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) + #define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) + + #define HW_INTERRUT_ASSERT_SET_0 \ + (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) + #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) + #define HW_INTERRUT_ASSERT_SET_1 \ + (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) + #define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) + #define HW_INTERRUT_ASSERT_SET_2 \ + (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) + #define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) + + #define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + + #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) + + #define RSS_FLAGS(bp) \ + (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ + (bp->multi_mode << \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) + #define MULTI_MASK 0x7f + + + #define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) + #define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) + #define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) + #define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) + + #define DEF_USB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_u, igu_index) + #define DEF_CSB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_c, igu_index) + #define DEF_XSB_IGU_INDEX_OFF \ + offsetof(struct xstorm_def_status_block, igu_index) + #define DEF_TSB_IGU_INDEX_OFF \ + offsetof(struct tstorm_def_status_block, igu_index) + + #define DEF_USB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_u, segment) + #define DEF_CSB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_c, segment) + #define DEF_XSB_SEGMENT_OFF \ + offsetof(struct xstorm_def_status_block, segment) + #define DEF_TSB_SEGMENT_OFF \ + offsetof(struct tstorm_def_status_block, segment) + + #define BNX2X_SP_DSB_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_DEF_CONS]) + + #define SET_FLAG(value, mask, flag) \ + do {\ + (value) &= ~(mask);\ + (value) |= ((flag) << (mask##_SHIFT));\ + } while (0) + + #define GET_FLAG(value, mask) \ + (((value) & (mask)) >> (mask##_SHIFT)) + + #define GET_FIELD(value, fname) \ + (((value) & (fname##_MASK)) >> (fname##_SHIFT)) + + #define CAM_IS_INVALID(x) \ + (GET_FLAG(x.flags, \ + MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ + (T_ETH_MAC_COMMAND_INVALIDATE)) + + /* Number of u32 elements in MC hash array */ + #define MC_HASH_SIZE 8 + #define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ + TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) + + + #ifndef PXP2_REG_PXP2_INT_STS + #define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 + #endif + + #ifndef ETH_MAX_RX_CLIENTS_E2 + #define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H + #endif + + #define BNX2X_VPD_LEN 128 + #define VENDOR_ID_LEN 4 + + /* Congestion management fairness mode */ + #define CMNG_FNS_NONE 0 + #define CMNG_FNS_MINMAX 1 + + #define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/ + #define HC_SEG_ACCESS_ATTN 4 + #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ + + static const u32 dmae_reg_go_c[] = { + DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, + DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, + DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, + DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 + }; + + void bnx2x_set_ethtool_ops(struct net_device *netdev); + void bnx2x_notify_link_changed(struct bnx2x *bp); + #endif /* bnx2x.h */ diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 0000000,5b1f9b5..283d663 mode 000000,100644..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@@ -1,0 -1,1491 +1,1491 @@@ + /* bnx2x_cmn.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein eilong@broadcom.com + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + #ifndef BNX2X_CMN_H + #define BNX2X_CMN_H + + #include <linux/types.h> + #include <linux/pci.h> + #include <linux/netdevice.h> + + + #include "bnx2x.h" + + /* This is used as a replacement for an MCP if it's not present */ + extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ + + extern int num_queues; + + /************************ Macros ********************************/ + #define BNX2X_PCI_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + + #define BNX2X_FREE(x) \ + do { \ + if (x) { \ + kfree((void *)x); \ + x = NULL; \ + } \ + } while (0) + + #define BNX2X_PCI_ALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + memset((void *)x, 0, size); \ + } while (0) + + #define BNX2X_ALLOC(x, size) \ + do { \ + x = kzalloc(size, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + } while (0) + + /*********************** Interfaces **************************** + * Functions that need to be implemented by each driver version + */ + /* Init */ + + /** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ + u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); + + /** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + */ + void bnx2x_send_unload_done(struct bnx2x *bp); + + /** + * bnx2x_config_rss_pf - configure RSS parameters. + * + * @bp: driver handle + * @ind_table: indirection table to configure + * @config_hash: re-configure RSS hash keys configuration + */ + int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); + + /** + * bnx2x__init_func_obj - init function object + * + * @bp: driver handle + * + * Initializes the Function Object with the appropriate + * parameters which include a function slow path driver + * interface. + */ + void bnx2x__init_func_obj(struct bnx2x *bp); + + /** + * bnx2x_setup_queue - setup eth queue. + * + * @bp: driver handle + * @fp: pointer to the fastpath structure + * @leading: boolean + * + */ + int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading); + + /** + * bnx2x_setup_leading - bring up a leading eth queue. + * + * @bp: driver handle + */ + int bnx2x_setup_leading(struct bnx2x *bp); + + /** + * bnx2x_fw_command - send the MCP a request + * + * @bp: driver handle + * @command: request + * @param: request's parameter + * + * block until there is a reply + */ + u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); + + /** + * bnx2x_initial_phy_init - initialize link parameters structure variables. + * + * @bp: driver handle + * @load_mode: current mode + */ + u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); + + /** + * bnx2x_link_set - configure hw according to link parameters structure. + * + * @bp: driver handle + */ + void bnx2x_link_set(struct bnx2x *bp); + + /** + * bnx2x_link_test - query link status. + * + * @bp: driver handle + * @is_serdes: bool + * + * Returns 0 if link is UP. + */ + u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); + + /** + * bnx2x_drv_pulse - write driver pulse to shmem + * + * @bp: driver handle + * + * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox + * in the shmem. + */ + void bnx2x_drv_pulse(struct bnx2x *bp); + + /** + * bnx2x_igu_ack_sb - update IGU with current SB value + * + * @bp: driver handle + * @igu_sb_id: SB id + * @segment: SB segment + * @index: SB index + * @op: SB operation + * @update: is HW update required + */ + void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update); + + /* Disable transactions from chip to host */ + void bnx2x_pf_disable(struct bnx2x *bp); + + /** + * bnx2x__link_status_update - handles link status change. + * + * @bp: driver handle + */ + void bnx2x__link_status_update(struct bnx2x *bp); + + /** + * bnx2x_link_report - report link status to upper layer. + * + * @bp: driver handle + */ + void bnx2x_link_report(struct bnx2x *bp); + + /* None-atomic version of bnx2x_link_report() */ + void __bnx2x_link_report(struct bnx2x *bp); + + /** + * bnx2x_get_mf_speed - calculate MF speed. + * + * @bp: driver handle + * + * Takes into account current linespeed and MF configuration. + */ + u16 bnx2x_get_mf_speed(struct bnx2x *bp); + + /** + * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ + irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); + + /** + * bnx2x_interrupt - non MSI-X interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ + irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); + #ifdef BCM_CNIC + + /** + * bnx2x_cnic_notify - send command to cnic driver + * + * @bp: driver handle + * @cmd: command + */ + int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); + + /** + * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information + * + * @bp: driver handle + */ + void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); + #endif + + /** + * bnx2x_int_enable - enable HW interrupts. + * + * @bp: driver handle + */ + void bnx2x_int_enable(struct bnx2x *bp); + + /** + * bnx2x_int_disable_sync - disable interrupts. + * + * @bp: driver handle + * @disable_hw: true, disable HW interrupts. + * + * This function ensures that there are no + * ISRs or SP DPCs (sp_task) are running after it returns. + */ + void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); + + /** + * bnx2x_nic_init - init driver internals. + * + * @bp: driver handle + * @load_code: COMMON, PORT or FUNCTION + * + * Initializes: + * - rings + * - status blocks + * - etc. + */ + void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); + + /** + * bnx2x_alloc_mem - allocate driver's memory. + * + * @bp: driver handle + */ + int bnx2x_alloc_mem(struct bnx2x *bp); + + /** + * bnx2x_free_mem - release driver's memory. + * + * @bp: driver handle + */ + void bnx2x_free_mem(struct bnx2x *bp); + + /** + * bnx2x_set_num_queues - set number of queues according to mode. + * + * @bp: driver handle + */ + void bnx2x_set_num_queues(struct bnx2x *bp); + + /** + * bnx2x_chip_cleanup - cleanup chip internals. + * + * @bp: driver handle + * @unload_mode: COMMON, PORT, FUNCTION + * + * - Cleanup MAC configuration. + * - Closes clients. + * - etc. + */ + void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); + + /** + * bnx2x_acquire_hw_lock - acquire HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ + int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); + + /** + * bnx2x_release_hw_lock - release HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ + int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); + + /** + * bnx2x_release_leader_lock - release recovery leader lock + * + * @bp: driver handle + */ + int bnx2x_release_leader_lock(struct bnx2x *bp); + + /** + * bnx2x_set_eth_mac - configure eth MAC address in the HW + * + * @bp: driver handle + * @set: set or clear + * + * Configures according to the value in netdev->dev_addr. + */ + int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); + + /** + * bnx2x_set_rx_mode - set MAC filtering configurations. + * + * @dev: netdevice + * + * called with netif_tx_lock from dev_mcast.c + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh() + */ + void bnx2x_set_rx_mode(struct net_device *dev); + + /** + * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. + * + * @bp: driver handle + * + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh(). + */ + void bnx2x_set_storm_rx_mode(struct bnx2x *bp); + + /** + * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. + * + * @bp: driver handle + * @cl_id: client id + * @rx_mode_flags: rx mode configuration + * @rx_accept_flags: rx accept configuration + * @tx_accept_flags: tx accept configuration (tx switch) + * @ramrod_flags: ramrod configuration + */ + void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags); + + /* Parity errors related */ + void bnx2x_inc_load_cnt(struct bnx2x *bp); + u32 bnx2x_dec_load_cnt(struct bnx2x *bp); + bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); + bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); + void bnx2x_set_reset_in_progress(struct bnx2x *bp); + void bnx2x_set_reset_global(struct bnx2x *bp); + void bnx2x_disable_close_the_gate(struct bnx2x *bp); + + /** + * bnx2x_sp_event - handle ramrods completion. + * + * @fp: fastpath handle for the event + * @rr_cqe: eth_rx_cqe + */ + void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); + + /** + * bnx2x_ilt_set_info - prepare ILT configurations. + * + * @bp: driver handle + */ + void bnx2x_ilt_set_info(struct bnx2x *bp); + + /** + * bnx2x_dcbx_init - initialize dcbx protocol. + * + * @bp: driver handle + */ + void bnx2x_dcbx_init(struct bnx2x *bp); + + /** + * bnx2x_set_power_state - set power state to the requested value. + * + * @bp: driver handle + * @state: required state D0 or D3hot + * + * Currently only D0 and D3hot are supported. + */ + int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); + + /** + * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. + * + * @bp: driver handle + * @value: new value + */ + void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); + /* Error handling */ + void bnx2x_panic_dump(struct bnx2x *bp); + + void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); + + /* dev_close main block */ + int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); + + /* dev_open main block */ + int bnx2x_nic_load(struct bnx2x *bp, int load_mode); + + /* hard_xmit callback */ + netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); + + /* setup_tc callback */ + int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); + + /* select_queue callback */ + u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); + + /* reload helper */ + int bnx2x_reload_if_running(struct net_device *dev); + + int bnx2x_change_mac_addr(struct net_device *dev, void *p); + + /* NAPI poll Rx part */ + int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); + + void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); + + /* NAPI poll Tx part */ + int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); + + /* suspend/resume callbacks */ + int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); + int bnx2x_resume(struct pci_dev *pdev); + + /* Release IRQ vectors */ + void bnx2x_free_irq(struct bnx2x *bp); + + void bnx2x_free_fp_mem(struct bnx2x *bp); + int bnx2x_alloc_fp_mem(struct bnx2x *bp); + void bnx2x_init_rx_rings(struct bnx2x *bp); + void bnx2x_free_skbs(struct bnx2x *bp); + void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); + void bnx2x_netif_start(struct bnx2x *bp); + + /** + * bnx2x_enable_msix - set msix configuration. + * + * @bp: driver handle + * + * fills msix_table, requests vectors, updates num_queues + * according to number of available vectors. + */ + int bnx2x_enable_msix(struct bnx2x *bp); + + /** + * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly + * + * @bp: driver handle + */ + int bnx2x_enable_msi(struct bnx2x *bp); + + /** + * bnx2x_poll - NAPI callback + * + * @napi: napi structure + * @budget: + * + */ + int bnx2x_poll(struct napi_struct *napi, int budget); + + /** + * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure + * + * @bp: driver handle + */ + int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); + + /** + * bnx2x_free_mem_bp - release memories outsize main driver structure + * + * @bp: driver handle + */ + void bnx2x_free_mem_bp(struct bnx2x *bp); + + /** + * bnx2x_change_mtu - change mtu netdev callback + * + * @dev: net device + * @new_mtu: requested mtu + * + */ + int bnx2x_change_mtu(struct net_device *dev, int new_mtu); + + #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) + /** + * bnx2x_fcoe_get_wwn - return the requested WWN value for this port + * + * @dev: net_device + * @wwn: output buffer + * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) + * + */ + int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); + #endif + u32 bnx2x_fix_features(struct net_device *dev, u32 features); + int bnx2x_set_features(struct net_device *dev, u32 features); + + /** + * bnx2x_tx_timeout - tx timeout netdev callback + * + * @dev: net device + */ + void bnx2x_tx_timeout(struct net_device *dev); + + /*********************** Inlines **********************************/ + /*********************** Fast path ********************************/ + static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) + { + barrier(); /* status block is written to by the chip */ + fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; + } + + static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 bd_prod, + u16 rx_comp_prod, u16 rx_sge_prod, u32 start) + { + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 i; + + /* Update producers */ + rx_prods.bd_prod = bd_prod; + rx_prods.cqe_prod = rx_comp_prod; + rx_prods.sge_prod = rx_sge_prod; + + /* + * Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < sizeof(rx_prods)/4; i++) + REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); + + mmiowb(); /* keep prod updates ordered */ + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); + } + + static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, + u8 segment, u16 index, u8 op, + u8 update, u32 igu_addr) + { + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n", + cmd_data.sb_id_and_flags, igu_addr); + REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); + } + + static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, + u8 idu_sb_id, bool is_Pf) + { + u32 data, ctl, cnt = 100; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; + u32 sb_bit = 1 << (idu_sb_id%32); + u32 func_encode = func | + ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; + + /* Not supported in BC mode */ + if (CHIP_INT_MODE_IS_BC(bp)) + return; + + data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup + << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | + IGU_REGULAR_CLEANUP_SET | + IGU_REGULAR_BCLEANUP; + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + data, igu_addr_data); + REG_WR(bp, igu_addr_data, data); + mmiowb(); + barrier(); + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); + + /* wait for clean up to finish */ + while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) + msleep(20); + + + if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { + DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: " + "idu_sb_id %d offset %d bit %d (cnt %d)\n", + idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); + } + } + + static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, + u8 storm, u16 index, u8 op, u8 update) + { + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_INT_ACK); + struct igu_ack_register igu_ack; + + igu_ack.status_block_index = index; + igu_ack.sb_id_and_flags = + ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", + (*(u32 *)&igu_ack), hc_addr); + REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); + } + + static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, + u16 index, u8 op, u8 update) + { + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); + else { + u8 segment; + + if (CHIP_INT_MODE_IS_BC(bp)) + segment = storm; + else if (igu_sb_id != bp->igu_dsb_id) + segment = IGU_SEG_ACCESS_DEF; + else if (storm == ATTENTION_ID) + segment = IGU_SEG_ACCESS_ATTN; + else + segment = IGU_SEG_ACCESS_DEF; + bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); + } + } + + static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) + { + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_SIMD_MASK); + u32 result = REG_RD(bp, hc_addr); + + DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", + result, hc_addr); + + barrier(); + return result; + } + + static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) + { + u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); + u32 result = REG_RD(bp, igu_addr); + + DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n", + result, igu_addr); + + barrier(); + return result; + } + + static inline u16 bnx2x_ack_int(struct bnx2x *bp) + { + barrier(); + if (bp->common.int_block == INT_BLOCK_HC) + return bnx2x_hc_ack_int(bp); + else + return bnx2x_igu_ack_int(bp); + } + + static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) + { + /* Tell compiler that consumer and producer can change */ + barrier(); + return txdata->tx_pkt_prod != txdata->tx_pkt_cons; + } + + static inline u16 bnx2x_tx_avail(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) + { + s16 used; + u16 prod; + u16 cons; + + prod = txdata->tx_bd_prod; + cons = txdata->tx_bd_cons; + + /* NUM_TX_RINGS = number of "next-page" entries + It will be used as a threshold */ + used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; + + #ifdef BNX2X_STOP_ON_ERROR + WARN_ON(used < 0); + WARN_ON(used > bp->tx_ring_size); + WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); + #endif + + return (s16)(bp->tx_ring_size) - used; + } + + static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) + { + u16 hw_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + return hw_cons != txdata->tx_pkt_cons; + } + + static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) + { + u8 cos; + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + return true; + return false; + } + + static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) + { + u16 rx_cons_sb; + + /* Tell compiler that status block fields can change */ + barrier(); + rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); + if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + rx_cons_sb++; + return (fp->rx_comp_cons != rx_cons_sb); + } + + /** + * bnx2x_tx_disable - disables tx from stack point of view + * + * @bp: driver handle + */ + static inline void bnx2x_tx_disable(struct bnx2x *bp) + { + netif_tx_disable(bp->dev); + netif_carrier_off(bp->dev); + } + + static inline void bnx2x_free_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) + { + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct page *page = sw_buf->page; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + + /* Skip "next page" elements */ + if (!page) + return; + + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + __free_pages(page, PAGES_PER_SGE_SHIFT); + + sw_buf->page = NULL; + sge->addr_hi = 0; + sge->addr_lo = 0; + } + + static inline void bnx2x_add_all_napi(struct bnx2x *bp) + { + int i; + + /* Add NAPI objects */ + for_each_rx_queue(bp, i) + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, BNX2X_NAPI_WEIGHT); + } + + static inline void bnx2x_del_all_napi(struct bnx2x *bp) + { + int i; + + for_each_rx_queue(bp, i) + netif_napi_del(&bnx2x_fp(bp, i, napi)); + } + + static inline void bnx2x_disable_msi(struct bnx2x *bp) + { + if (bp->flags & USING_MSIX_FLAG) { + pci_disable_msix(bp->pdev); + bp->flags &= ~USING_MSIX_FLAG; + } else if (bp->flags & USING_MSI_FLAG) { + pci_disable_msi(bp->pdev); + bp->flags &= ~USING_MSI_FLAG; + } + } + + static inline int bnx2x_calc_num_queues(struct bnx2x *bp) + { + return num_queues ? + min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : + min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); + } + + static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) + { + int i, j; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + int idx = RX_SGE_CNT * i - 1; + + for (j = 0; j < 2; j++) { + BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); + idx--; + } + } + } + + static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) + { + /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ + memset(fp->sge_mask, 0xff, + (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); + + /* Clear the two last indices in the page to 1: + these are the indices that correspond to the "next" element, + hence will never be indicated and should be removed from + the calculations. */ + bnx2x_clear_sge_mask_next_elems(fp); + } + + static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) + { + struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + dma_addr_t mapping; + + if (unlikely(page == NULL)) + return -ENOMEM; + + mapping = dma_map_page(&bp->pdev->dev, page, 0, + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + __free_pages(page, PAGES_PER_SGE_SHIFT); + return -ENOMEM; + } + + sw_buf->page = page; + dma_unmap_addr_set(sw_buf, mapping, mapping); + + sge->addr_hi = cpu_to_le32(U64_HI(mapping)); + sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; + } + + static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) + { + struct sk_buff *skb; + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; + struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; + dma_addr_t mapping; + + skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + if (unlikely(skb == NULL)) + return -ENOMEM; + + mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + rx_buf->skb = skb; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; + } + + /* note that we are not allocating a new skb, + * we are just moving one from cons to prod + * we are not creating a new mapping, + * so there is no need to check for dma_mapping_error(). + */ + static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, + u16 cons, u16 prod) + { + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + prod_rx_buf->skb = cons_rx_buf->skb; + *prod_bd = *cons_bd; + } + + /************************* Init ******************************************/ + + /** + * bnx2x_func_start - init function + * + * @bp: driver handle + * + * Must be called before sending CLIENT_SETUP for the first client. + */ + static inline int bnx2x_func_start(struct bnx2x *bp) + { + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_start_params *start_params = + &func_params.params.start; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_START; + + /* Function parameters */ + start_params->mf_mode = bp->mf_mode; + start_params->sd_vlan_tag = bp->mf_ov; + if (CHIP_IS_E1x(bp)) + start_params->network_cos_mode = OVERRIDE_COS; + else + start_params->network_cos_mode = STATIC_COS; + + return bnx2x_func_state_change(bp, &func_params); + } + + + /** + * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format + * + * @fw_hi: pointer to upper part + * @fw_mid: pointer to middle part + * @fw_lo: pointer to lower part + * @mac: pointer to MAC address + */ + static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, + u8 *mac) + { + ((u8 *)fw_hi)[0] = mac[1]; + ((u8 *)fw_hi)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lo)[0] = mac[5]; + ((u8 *)fw_lo)[1] = mac[4]; + } + + static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) + { + int i; + + if (fp->disable_tpa) + return; + + for (i = 0; i < last; i++) + bnx2x_free_rx_sge(bp, fp, i); + } + + static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) + { + int i; + + for (i = 0; i < last; i++) { + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + struct sk_buff *skb = first_buf->skb; + + if (skb == NULL) { + DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); + continue; + } + if (tpa_info->tpa_state == BNX2X_TPA_START) + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(first_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + dev_kfree_skb(skb); + first_buf->skb = NULL; + } + } + + static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) + { + int i; + + for (i = 1; i <= NUM_TX_RINGS; i++) { + struct eth_tx_next_bd *tx_next_bd = + &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; + + tx_next_bd->addr_hi = + cpu_to_le32(U64_HI(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + tx_next_bd->addr_lo = + cpu_to_le32(U64_LO(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + } + + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); + txdata->tx_db.data.zero_fill1 = 0; + txdata->tx_db.data.prod = 0; + + txdata->tx_pkt_prod = 0; + txdata->tx_pkt_cons = 0; + txdata->tx_bd_prod = 0; + txdata->tx_bd_cons = 0; + txdata->tx_pkt = 0; + } + + static inline void bnx2x_init_tx_rings(struct bnx2x *bp) + { + int i; + u8 cos; + + for_each_tx_queue(bp, i) + for_each_cos_in_tx_queue(&bp->fp[i], cos) + bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); + } + + static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) + { + int i; + + for (i = 1; i <= NUM_RX_RINGS; i++) { + struct eth_rx_bd *rx_bd; + + rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; + rx_bd->addr_hi = + cpu_to_le32(U64_HI(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + rx_bd->addr_lo = + cpu_to_le32(U64_LO(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + } + } + + static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) + { + int i; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + struct eth_rx_sge *sge; + + sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; + sge->addr_hi = + cpu_to_le32(U64_HI(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + + sge->addr_lo = + cpu_to_le32(U64_LO(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + } + } + + static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) + { + int i; + for (i = 1; i <= NUM_RCQ_RINGS; i++) { + struct eth_rx_cqe_next_page *nextpg; + + nextpg = (struct eth_rx_cqe_next_page *) + &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; + nextpg->addr_hi = + cpu_to_le32(U64_HI(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + nextpg->addr_lo = + cpu_to_le32(U64_LO(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + } + } + + /* Returns the number of actually allocated BDs */ + static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, + int rx_ring_size) + { + struct bnx2x *bp = fp->bp; + u16 ring_prod, cqe_ring_prod; + int i; + + fp->rx_comp_cons = 0; + cqe_ring_prod = ring_prod = 0; + + /* This routine is called only during fo init so + * fp->eth_q_stats.rx_skb_alloc_failed = 0 + */ + for (i = 0; i < rx_ring_size; i++) { + if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { + fp->eth_q_stats.rx_skb_alloc_failed++; + continue; + } + ring_prod = NEXT_RX_IDX(ring_prod); + cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); + WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed)); + } + + if (fp->eth_q_stats.rx_skb_alloc_failed) + BNX2X_ERR("was only able to allocate " + "%d rx skbs on queue[%d]\n", + (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index); + + fp->rx_bd_prod = ring_prod; + /* Limit the CQE producer by the CQE ring size */ + fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, + cqe_ring_prod); + fp->rx_pkt = fp->rx_calls = 0; + + return i - fp->eth_q_stats.rx_skb_alloc_failed; + } + + /* Statistics ID are global per chip/path, while Client IDs for E1x are per + * port. + */ + static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) + { + if (!CHIP_IS_E1x(fp->bp)) + return fp->cl_id; + else + return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; + } + + static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, + bnx2x_obj_type obj_type) + { + struct bnx2x *bp = fp->bp; + + /* Configure classification DBs */ + bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, + BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, obj_type, + &bp->macs_pool); + } + + /** + * bnx2x_get_path_func_num - get number of active functions + * + * @bp: driver handle + * + * Calculates the number of active (not hidden) functions on the + * current path. + */ + static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) + { + u8 func_num = 0, i; + + /* 57710 has only one function per-port */ + if (CHIP_IS_E1(bp)) + return 1; + + /* Calculate a number of functions enabled on the current + * PATH/PORT. + */ + if (CHIP_REV_IS_SLOW(bp)) { + if (IS_MF(bp)) + func_num = 4; + else + func_num = 2; + } else { + for (i = 0; i < E1H_FUNC_MAX / 2; i++) { + u32 func_config = + MF_CFG_RD(bp, + func_mf_config[BP_PORT(bp) + 2 * i]. + config); + func_num += + ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); + } + } + + WARN_ON(!func_num); + + return func_num; + } + + static inline void bnx2x_init_bp_objs(struct bnx2x *bp) + { + /* RX_MODE controlling object */ + bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); + + /* multicast configuration controlling object */ + bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, + BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, mcast_rdata), + bnx2x_sp_mapping(bp, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); + + /* Setup CAM credit pools */ + bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), + bnx2x_get_path_func_num(bp)); + + /* RSS configuration object */ + bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, + bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, rss_rdata), + bnx2x_sp_mapping(bp, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); + } + + static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) + { + if (CHIP_IS_E1x(fp->bp)) + return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; + else + return fp->cl_id; + } + + static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) + { + struct bnx2x *bp = fp->bp; + + if (!CHIP_IS_E1x(bp)) + return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + else + return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); + } + + static inline void bnx2x_init_txdata(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, + __le16 *tx_cons_sb) + { + txdata->cid = cid; + txdata->txq_index = txq_index; + txdata->tx_cons_sb = tx_cons_sb; + + DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n", + txdata->cid, txdata->txq_index); + } + + #ifdef BCM_CNIC + static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) + { + return bp->cnic_base_cl_id + cl_idx + - (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; ++ (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; + } + + static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) + { + + /* the 'first' id is allocated for the cnic */ + return bp->base_fw_ndsb; + } + + static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) + { + return bp->igu_base_sb; + } + + + static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) + { + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + unsigned long q_type = 0; + + bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, + BNX2X_FCOE_ETH_CL_ID_IDX); + /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than + * 16 ETH clients per function when CNIC is enabled! + * + * Fix it ASAP!!! + */ + bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; + bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; + bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; + bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; + + bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); + + DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index); + + /* qZone id equals to FW (per path) client id */ + bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); + /* init shortcut */ + bnx2x_fcoe(bp, ustorm_rx_prods_offset) = + bnx2x_rx_ustorm_prods_offset(fp); + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* No multi-CoS for FCoE L2 client */ + BUG_ON(fp->max_cos != 1); + + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " + "igu_sb %d\n", + fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); + } + #endif + + static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) + { + int cnt = 1000; + + while (bnx2x_has_tx_work_unload(txdata)) { + if (!cnt) { + BNX2X_ERR("timeout waiting for queue[%d]: " + "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", + txdata->txq_index, txdata->tx_pkt_prod, + txdata->tx_pkt_cons); + #ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); + return -EBUSY; + #else + break; + #endif + } + cnt--; + usleep_range(1000, 1000); + } + + return 0; + } + + int bnx2x_get_link_cfg_idx(struct bnx2x *bp); + + static inline void __storm_memset_struct(struct bnx2x *bp, + u32 addr, size_t size, u32 *data) + { + int i; + for (i = 0; i < size/4; i++) + REG_WR(bp, addr + (i * 4), data[i]); + } + + static inline void storm_memset_func_cfg(struct bnx2x *bp, + struct tstorm_eth_function_common_config *tcfg, + u16 abs_fid) + { + size_t size = sizeof(struct tstorm_eth_function_common_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); + + __storm_memset_struct(bp, addr, size, (u32 *)tcfg); + } + + static inline void storm_memset_cmng(struct bnx2x *bp, + struct cmng_struct_per_port *cmng, + u8 port) + { + size_t size = sizeof(struct cmng_struct_per_port); + + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); + + __storm_memset_struct(bp, addr, size, (u32 *)cmng); + } + + /** + * bnx2x_wait_sp_comp - wait for the outstanding SP commands. + * + * @bp: driver handle + * @mask: bits that need to be cleared + */ + static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) + { + int tout = 5000; /* Wait for 5 secs tops */ + + while (tout--) { + smp_mb(); + netif_addr_lock_bh(bp->dev); + if (!(bp->sp_state & mask)) { + netif_addr_unlock_bh(bp->dev); + return true; + } + netif_addr_unlock_bh(bp->dev); + + usleep_range(1000, 1000); + } + + smp_mb(); + + netif_addr_lock_bh(bp->dev); + if (bp->sp_state & mask) { + BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " + "mask 0x%lx\n", bp->sp_state, mask); + netif_addr_unlock_bh(bp->dev); + return false; + } + netif_addr_unlock_bh(bp->dev); + + return true; + } + + /** + * bnx2x_set_ctx_validation - set CDU context validation values + * + * @bp: driver handle + * @cxt: context of the connection on the host memory + * @cid: SW CID of the connection to be configured + */ + void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid); + + void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec); + void bnx2x_acquire_phy_lock(struct bnx2x *bp); + void bnx2x_release_phy_lock(struct bnx2x *bp); + + /** + * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. + * + * @bp: driver handle + * @mf_cfg: MF configuration + * + */ + static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) + { + u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (!max_cfg) { + DP(NETIF_MSG_LINK, + "Max BW configured to 0 - using 100 instead\n"); + max_cfg = 100; + } + return max_cfg; + } + + #endif /* BNX2X_CMN_H */ diff --combined drivers/net/ethernet/broadcom/tg3.c index 0000000,b89027c..b865e9f mode 000000,100644..100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@@ -1,0 -1,15951 +1,15951 @@@ + /* + * tg3.c: Broadcom Tigon3 ethernet driver. + * + * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2005-2011 Broadcom Corporation. + * + * Firmware is: + * Derived from proprietary unpublished source code, + * Copyright (C) 2000-2003 Broadcom Corporation. + * + * Permission is hereby granted for the distribution of this firmware + * data in hexadecimal or equivalent format, provided this copyright + * notice is accompanying it. + */ + + + #include <linux/module.h> + #include <linux/moduleparam.h> + #include <linux/stringify.h> + #include <linux/kernel.h> + #include <linux/types.h> + #include <linux/compiler.h> + #include <linux/slab.h> + #include <linux/delay.h> + #include <linux/in.h> + #include <linux/init.h> + #include <linux/interrupt.h> + #include <linux/ioport.h> + #include <linux/pci.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/skbuff.h> + #include <linux/ethtool.h> + #include <linux/mdio.h> + #include <linux/mii.h> + #include <linux/phy.h> + #include <linux/brcmphy.h> + #include <linux/if_vlan.h> + #include <linux/ip.h> + #include <linux/tcp.h> + #include <linux/workqueue.h> + #include <linux/prefetch.h> + #include <linux/dma-mapping.h> + #include <linux/firmware.h> + + #include <net/checksum.h> + #include <net/ip.h> + + #include <asm/system.h> + #include <linux/io.h> + #include <asm/byteorder.h> + #include <linux/uaccess.h> + + #ifdef CONFIG_SPARC + #include <asm/idprom.h> + #include <asm/prom.h> + #endif + + #define BAR_0 0 + #define BAR_2 2 + + #include "tg3.h" + + /* Functions & macros to verify TG3_FLAGS types */ + + static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) + { + return test_bit(flag, bits); + } + + static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) + { + set_bit(flag, bits); + } + + static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) + { + clear_bit(flag, bits); + } + + #define tg3_flag(tp, flag) \ + _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) + #define tg3_flag_set(tp, flag) \ + _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) + #define tg3_flag_clear(tp, flag) \ + _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) + + #define DRV_MODULE_NAME "tg3" + #define TG3_MAJ_NUM 3 + #define TG3_MIN_NUM 120 + #define DRV_MODULE_VERSION \ + __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) + #define DRV_MODULE_RELDATE "August 18, 2011" + + #define RESET_KIND_SHUTDOWN 0 + #define RESET_KIND_INIT 1 + #define RESET_KIND_SUSPEND 2 + + #define TG3_DEF_RX_MODE 0 + #define TG3_DEF_TX_MODE 0 + #define TG3_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + + #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 + + /* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ + + #define TG3_TX_TIMEOUT (5 * HZ) + + /* hardware minimum and maximum for a single frame's data payload */ + #define TG3_MIN_MTU 60 + #define TG3_MAX_MTU(tp) \ + (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) + + /* These numbers seem to be hard coded in the NIC firmware somehow. + * You can't change the ring sizes, but you can change where you place + * them in the NIC onboard memory. + */ + #define TG3_RX_STD_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) + #define TG3_DEF_RX_RING_PENDING 200 + #define TG3_RX_JMB_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) + #define TG3_DEF_RX_JUMBO_RING_PENDING 100 + #define TG3_RSS_INDIR_TBL_SIZE 128 + + /* Do not place this n-ring entries value into the tp struct itself, + * we really want to expose these constants to GCC so that modulo et + * al. operations are done with shifts and masks instead of with + * hw multiply/modulo instructions. Another solution would be to + * replace things like '% foo' with '& (foo - 1)'. + */ + + #define TG3_TX_RING_SIZE 512 + #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) + + #define TG3_RX_STD_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) + #define TG3_RX_JMB_RING_BYTES(tp) \ + (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) + #define TG3_RX_RCB_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) + #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ + TG3_TX_RING_SIZE) + #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) + + #define TG3_DMA_BYTE_ENAB 64 + + #define TG3_RX_STD_DMA_SZ 1536 + #define TG3_RX_JMB_DMA_SZ 9046 + + #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) + + #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) + #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) + + #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) + + #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) + + /* Due to a hardware bug, the 5701 can only DMA to memory addresses + * that are at least dword aligned when used in PCIX mode. The driver + * works around this bug by double copying the packet. This workaround + * is built into the normal double copy length check for efficiency. + * + * However, the double copy is only necessary on those architectures + * where unaligned memory accesses are inefficient. For those architectures + * where unaligned memory accesses incur little penalty, we can reintegrate + * the 5701 in the normal rx path. Doing so saves a device structure + * dereference by hardcoding the double copy threshold in place. + */ + #define TG3_RX_COPY_THRESHOLD 256 + #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD + #else + #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) + #endif + + #if (NET_IP_ALIGN != 0) + #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) + #else + #define TG3_RX_OFFSET(tp) 0 + #endif + + /* minimum number of free TX descriptors required to wake up TX process */ + #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) + #define TG3_TX_BD_DMA_MAX 4096 + + #define TG3_RAW_IP_ALIGN 2 + + #define TG3_FW_UPDATE_TIMEOUT_SEC 5 + + #define FIRMWARE_TG3 "tigon/tg3.bin" + #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" + #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" + + static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; + + MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); + MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_MODULE_VERSION); + MODULE_FIRMWARE(FIRMWARE_TG3); + MODULE_FIRMWARE(FIRMWARE_TG3TSO); + MODULE_FIRMWARE(FIRMWARE_TG3TSO5); + + static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ + module_param(tg3_debug, int, 0); + MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); + + static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, + {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, + {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ + {} + }; + + MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); + + static const struct { + const char string[ETH_GSTRING_LEN]; + } ethtool_stats_keys[] = { + { "rx_octets" }, + { "rx_fragments" }, + { "rx_ucast_packets" }, + { "rx_mcast_packets" }, + { "rx_bcast_packets" }, + { "rx_fcs_errors" }, + { "rx_align_errors" }, + { "rx_xon_pause_rcvd" }, + { "rx_xoff_pause_rcvd" }, + { "rx_mac_ctrl_rcvd" }, + { "rx_xoff_entered" }, + { "rx_frame_too_long_errors" }, + { "rx_jabbers" }, + { "rx_undersize_packets" }, + { "rx_in_length_errors" }, + { "rx_out_length_errors" }, + { "rx_64_or_less_octet_packets" }, + { "rx_65_to_127_octet_packets" }, + { "rx_128_to_255_octet_packets" }, + { "rx_256_to_511_octet_packets" }, + { "rx_512_to_1023_octet_packets" }, + { "rx_1024_to_1522_octet_packets" }, + { "rx_1523_to_2047_octet_packets" }, + { "rx_2048_to_4095_octet_packets" }, + { "rx_4096_to_8191_octet_packets" }, + { "rx_8192_to_9022_octet_packets" }, + + { "tx_octets" }, + { "tx_collisions" }, + + { "tx_xon_sent" }, + { "tx_xoff_sent" }, + { "tx_flow_control" }, + { "tx_mac_errors" }, + { "tx_single_collisions" }, + { "tx_mult_collisions" }, + { "tx_deferred" }, + { "tx_excessive_collisions" }, + { "tx_late_collisions" }, + { "tx_collide_2times" }, + { "tx_collide_3times" }, + { "tx_collide_4times" }, + { "tx_collide_5times" }, + { "tx_collide_6times" }, + { "tx_collide_7times" }, + { "tx_collide_8times" }, + { "tx_collide_9times" }, + { "tx_collide_10times" }, + { "tx_collide_11times" }, + { "tx_collide_12times" }, + { "tx_collide_13times" }, + { "tx_collide_14times" }, + { "tx_collide_15times" }, + { "tx_ucast_packets" }, + { "tx_mcast_packets" }, + { "tx_bcast_packets" }, + { "tx_carrier_sense_errors" }, + { "tx_discards" }, + { "tx_errors" }, + + { "dma_writeq_full" }, + { "dma_write_prioq_full" }, + { "rxbds_empty" }, + { "rx_discards" }, + { "rx_errors" }, + { "rx_threshold_hit" }, + + { "dma_readq_full" }, + { "dma_read_prioq_full" }, + { "tx_comp_queue_full" }, + + { "ring_set_send_prod_index" }, + { "ring_status_update" }, + { "nic_irqs" }, + { "nic_avoided_irqs" }, + { "nic_tx_threshold_hit" }, + + { "mbuf_lwm_thresh_hit" }, + }; + + #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) + + + static const struct { + const char string[ETH_GSTRING_LEN]; + } ethtool_test_keys[] = { + { "nvram test (online) " }, + { "link test (online) " }, + { "register test (offline)" }, + { "memory test (offline)" }, + { "mac loopback test (offline)" }, + { "phy loopback test (offline)" }, + { "ext loopback test (offline)" }, + { "interrupt test (offline)" }, + }; + + #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) + + + static void tg3_write32(struct tg3 *tp, u32 off, u32 val) + { + writel(val, tp->regs + off); + } + + static u32 tg3_read32(struct tg3 *tp, u32 off) + { + return readl(tp->regs + off); + } + + static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) + { + writel(val, tp->aperegs + off); + } + + static u32 tg3_ape_read32(struct tg3 *tp, u32 off) + { + return readl(tp->aperegs + off); + } + + static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) + { + unsigned long flags; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + } + + static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) + { + writel(val, tp->regs + off); + readl(tp->regs + off); + } + + static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) + { + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; + } + + static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) + { + unsigned long flags; + + if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { + pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + if (off == TG3_RX_STD_PROD_IDX_REG) { + pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + + /* In indirect mode when disabling interrupts, we also need + * to clear the interrupt bit in the GRC local ctrl register. + */ + if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && + (val == 0x1)) { + pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, + tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); + } + } + + static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) + { + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; + } + + /* usec_wait specifies the wait time in usec when writing to certain registers + * where it is unsafe to read back the register without some delay. + * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. + * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. + */ + static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) + { + if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) + /* Non-posted methods */ + tp->write32(tp, off, val); + else { + /* Posted method */ + tg3_write32(tp, off, val); + if (usec_wait) + udelay(usec_wait); + tp->read32(tp, off); + } + /* Wait again after the read for the posted method to guarantee that + * the wait time is met. + */ + if (usec_wait) + udelay(usec_wait); + } + + static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) + { + tp->write32_mbox(tp, off, val); + if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) + tp->read32_mbox(tp, off); + } + + static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) + { + void __iomem *mbox = tp->regs + off; + writel(val, mbox); + if (tg3_flag(tp, TXD_MBOX_HWBUG)) + writel(val, mbox); + if (tg3_flag(tp, MBOX_WRITE_REORDER)) + readl(mbox); + } + + static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) + { + return readl(tp->regs + off + GRCMBOX_BASE); + } + + static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) + { + writel(val, tp->regs + off + GRCMBOX_BASE); + } + + #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) + #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) + #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) + #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) + #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) + + #define tw32(reg, val) tp->write32(tp, reg, val) + #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) + #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) + #define tr32(reg) tp->read32(tp, reg) + + static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) + { + unsigned long flags; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) + return; + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + tw32_f(TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); + } + + static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) + { + unsigned long flags; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { + *val = 0; + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + *val = tr32(TG3PCI_MEM_WIN_DATA); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); + } + + static void tg3_ape_lock_init(struct tg3 *tp) + { + int i; + u32 regbase, bit; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + regbase = TG3_APE_LOCK_GRANT; + else + regbase = TG3_APE_PER_LOCK_GRANT; + + /* Make sure the driver hasn't any stale locks. */ + for (i = 0; i < 8; i++) { + if (i == TG3_APE_LOCK_GPIO) + continue; + tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); + } + + /* Clear the correct bit of the GPIO lock too. */ + if (!tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit); + } + + static int tg3_ape_lock(struct tg3 *tp, int locknum) + { + int i, off; + int ret = 0; + u32 status, req, gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return 0; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return 0; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return -EINVAL; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + req = TG3_APE_LOCK_REQ; + gnt = TG3_APE_LOCK_GRANT; + } else { + req = TG3_APE_PER_LOCK_REQ; + gnt = TG3_APE_PER_LOCK_GRANT; + } + + off = 4 * locknum; + + if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) + bit = APE_LOCK_REQ_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, req + off, bit); + + /* Wait for up to 1 millisecond to acquire lock. */ + for (i = 0; i < 100; i++) { + status = tg3_ape_read32(tp, gnt + off); + if (status == bit) + break; + udelay(10); + } + + if (status != bit) { + /* Revoke the lock request. */ + tg3_ape_write32(tp, gnt + off, bit); + ret = -EBUSY; + } + + return ret; + } + + static void tg3_ape_unlock(struct tg3 *tp, int locknum) + { + u32 gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + gnt = TG3_APE_LOCK_GRANT; + else + gnt = TG3_APE_PER_LOCK_GRANT; + + if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, gnt + 4 * locknum, bit); + } + + static void tg3_ape_send_event(struct tg3 *tp, u32 event) + { + int i; + u32 apedata; + + /* NCSI does not support APE events */ + if (tg3_flag(tp, APE_HAS_NCSI)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + /* Wait for up to 1 millisecond for APE to service previous event. */ + for (i = 0; i < 10; i++) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + udelay(100); + } + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); + } + + static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) + { + u32 event; + u32 apedata; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (kind) { + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, + TG3_APE_HOST_DRVR_STATE_START); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + + if (device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE)) { + tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, + TG3_APE_HOST_WOL_SPEED_AUTO); + apedata = TG3_APE_HOST_DRVR_STATE_WOL; + } else + apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; + + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); + + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case RESET_KIND_SUSPEND: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + tg3_ape_send_event(tp, event); + } + + static void tg3_disable_ints(struct tg3 *tp) + { + int i; + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); + for (i = 0; i < tp->irq_max; i++) + tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); + } + + static void tg3_enable_ints(struct tg3 *tp) + { + int i; + + tp->irq_sync = 0; + wmb(); + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); + + tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + if (tg3_flag(tp, 1SHOT_MSI)) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + tp->coal_now |= tnapi->coal_now; + } + + /* Force an initial interrupt */ + if (!tg3_flag(tp, TAGGED_STATUS) && + (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + else + tw32(HOSTCC_MODE, tp->coal_now); + + tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); + } + + static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) + { + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int work_exists = 0; + + /* check for phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + if (sblk->status & SD_STATUS_LINK_CHG) + work_exists = 1; + } + /* check for RX/TX work to do */ + if (sblk->idx[0].tx_consumer != tnapi->tx_cons || + *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_exists = 1; + + return work_exists; + } + + /* tg3_int_reenable + * similar to tg3_enable_ints, but it accurately determines whether there + * is new work pending and can return without flushing the PIO write + * which reenables interrupts + */ + static void tg3_int_reenable(struct tg3_napi *tnapi) + { + struct tg3 *tp = tnapi->tp; + + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + + /* When doing tagged status, this work check is unnecessary. + * The last_tag we write above tells the chip which piece of + * work we've completed. + */ + if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | tnapi->coal_now); + } + + static void tg3_switch_clocks(struct tg3 *tp) + { + u32 clock_ctrl; + u32 orig_clock_ctrl; + + if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) + return; + + clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); + + orig_clock_ctrl = clock_ctrl; + clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | + CLOCK_CTRL_CLKRUN_OENABLE | + 0x1f); + tp->pci_clock_ctrl = clock_ctrl; + + if (tg3_flag(tp, 5705_PLUS)) { + if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | CLOCK_CTRL_625_CORE, 40); + } + } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | + (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), + 40); + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | (CLOCK_CTRL_ALTCLK), + 40); + } + tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); + } + + #define PHY_BUSY_LOOPS 5000 + + static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) + { + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + *val = 0x0; + + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (MI_COM_CMD_READ | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) { + *val = frame_val & MI_COM_DATA_MASK; + ret = 0; + } + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + return ret; + } + + static int tg3_writephy(struct tg3 *tp, int reg, u32 val) + { + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) + return 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (val & MI_COM_DATA_MASK); + frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) + ret = 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + return ret; + } + + static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); + + done: + return err; + } + + static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); + + done: + return err; + } + + static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; + } + + static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; + } + + static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_AUX_CTRL, + (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | + MII_TG3_AUXCTL_SHDWSEL_MISC); + if (!err) + err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); + + return err; + } + + static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) + { + if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) + set |= MII_TG3_AUXCTL_MISC_WREN; + + return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); + } + + #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ + MII_TG3_AUXCTL_ACTL_TX_6DB) + + #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_TX_6DB); + + static int tg3_bmcr_reset(struct tg3 *tp) + { + u32 phy_control; + int limit, err; + + /* OK, reset it, and poll the BMCR_RESET bit until it + * clears or we time out. + */ + phy_control = BMCR_RESET; + err = tg3_writephy(tp, MII_BMCR, phy_control); + if (err != 0) + return -EBUSY; + + limit = 5000; + while (limit--) { + err = tg3_readphy(tp, MII_BMCR, &phy_control); + if (err != 0) + return -EBUSY; + + if ((phy_control & BMCR_RESET) == 0) { + udelay(40); + break; + } + udelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; + } + + static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) + { + struct tg3 *tp = bp->priv; + u32 val; + + spin_lock_bh(&tp->lock); + + if (tg3_readphy(tp, reg, &val)) + val = -EIO; + + spin_unlock_bh(&tp->lock); + + return val; + } + + static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) + { + struct tg3 *tp = bp->priv; + u32 ret = 0; + + spin_lock_bh(&tp->lock); + + if (tg3_writephy(tp, reg, val)) + ret = -EIO; + + spin_unlock_bh(&tp->lock); + + return ret; + } + + static int tg3_mdio_reset(struct mii_bus *bp) + { + return 0; + } + + static void tg3_mdio_config_5785(struct tg3 *tp) + { + u32 val; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + val = MAC_PHYCFG2_50610_LED_MODES; + break; + case PHY_ID_BCMAC131: + val = MAC_PHYCFG2_AC131_LED_MODES; + break; + case PHY_ID_RTL8211C: + val = MAC_PHYCFG2_RTL8211C_LED_MODES; + break; + case PHY_ID_RTL8201E: + val = MAC_PHYCFG2_RTL8201E_LED_MODES; + break; + default: + return; + } + + if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RGMII_INT | + MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; + tw32(MAC_PHYCFG1, val); + + return; + } + + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) + val |= MAC_PHYCFG2_EMODE_MASK_MASK | + MAC_PHYCFG2_FMODE_MASK_MASK | + MAC_PHYCFG2_GMODE_MASK_MASK | + MAC_PHYCFG2_ACT_MASK_MASK | + MAC_PHYCFG2_QUAL_MASK_MASK | + MAC_PHYCFG2_INBAND_ENABLE; + + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | + MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; + } + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | + MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; + tw32(MAC_PHYCFG1, val); + + val = tr32(MAC_EXT_RGMII_MODE); + val &= ~(MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET | + MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET; + } + tw32(MAC_EXT_RGMII_MODE, val); + } + + static void tg3_mdio_start(struct tg3 *tp) + { + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + + if (tg3_flag(tp, MDIOBUS_INITED) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); + } + + static int tg3_mdio_init(struct tg3 *tp) + { + int i; + u32 reg; + struct phy_device *phydev; + + if (tg3_flag(tp, 5717_PLUS)) { + u32 is_serdes; + + tp->phy_addr = tp->pci_fn + 1; + + if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) + is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; + else + is_serdes = tr32(TG3_CPMU_PHY_STRAP) & + TG3_CPMU_PHY_STRAP_IS_SERDES; + if (is_serdes) + tp->phy_addr += 7; + } else + tp->phy_addr = TG3_PHY_MII_ADDR; + + tg3_mdio_start(tp); + + if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) + return 0; + + tp->mdio_bus = mdiobus_alloc(); + if (tp->mdio_bus == NULL) + return -ENOMEM; + + tp->mdio_bus->name = "tg3 mdio bus"; + snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", + (tp->pdev->bus->number << 8) | tp->pdev->devfn); + tp->mdio_bus->priv = tp; + tp->mdio_bus->parent = &tp->pdev->dev; + tp->mdio_bus->read = &tg3_mdio_read; + tp->mdio_bus->write = &tg3_mdio_write; + tp->mdio_bus->reset = &tg3_mdio_reset; + tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); + tp->mdio_bus->irq = &tp->mdio_irq[0]; + + for (i = 0; i < PHY_MAX_ADDR; i++) + tp->mdio_bus->irq[i] = PHY_POLL; + + /* The bus registration will look for all the PHYs on the mdio bus. + * Unfortunately, it does not ensure the PHY is powered up before + * accessing the PHY ID registers. A chip reset is the + * quickest way to bring the device back to an operational state.. + */ + if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) + tg3_bmcr_reset(tp); + + i = mdiobus_register(tp->mdio_bus); + if (i) { + dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); + mdiobus_free(tp->mdio_bus); + return i; + } + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (!phydev || !phydev->drv) { + dev_warn(&tp->pdev->dev, "No PHY devices\n"); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + return -ENODEV; + } + + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM57780: + phydev->interface = PHY_INTERFACE_MODE_GMII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + break; + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | + PHY_BRCM_RX_REFCLK_UNUSED | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_AUTO_PWRDWN_ENABLE; + if (tg3_flag(tp, RGMII_INBAND_DISABLE)) + phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; + /* fallthru */ + case PHY_ID_RTL8211C: + phydev->interface = PHY_INTERFACE_MODE_RGMII; + break; + case PHY_ID_RTL8201E: + case PHY_ID_BCMAC131: + phydev->interface = PHY_INTERFACE_MODE_MII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + tp->phy_flags |= TG3_PHYFLG_IS_FET; + break; + } + + tg3_flag_set(tp, MDIOBUS_INITED); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); + + return 0; + } + + static void tg3_mdio_fini(struct tg3 *tp) + { + if (tg3_flag(tp, MDIOBUS_INITED)) { + tg3_flag_clear(tp, MDIOBUS_INITED); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + } + } + + /* tp->lock is held. */ + static inline void tg3_generate_fw_event(struct tg3 *tp) + { + u32 val; + + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); + + tp->last_event_jiffies = jiffies; + } + + #define TG3_FW_EVENT_TIMEOUT_USEC 2500 + + /* tp->lock is held. */ + static void tg3_wait_for_event_ack(struct tg3 *tp) + { + int i; + unsigned int delay_cnt; + long time_remain; + + /* If enough time has passed, no wait is necessary. */ + time_remain = (long)(tp->last_event_jiffies + 1 + + usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - + (long)jiffies; + if (time_remain < 0) + return; + + /* Check if we can shorten the wait time. */ + delay_cnt = jiffies_to_usecs(time_remain); + if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) + delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; + delay_cnt = (delay_cnt >> 3) + 1; + + for (i = 0; i < delay_cnt; i++) { + if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) + break; + udelay(8); + } + } + + /* tp->lock is held. */ + static void tg3_ump_link_report(struct tg3 *tp) + { + u32 reg; + u32 val; + + if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) + return; + + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); + + val = 0; + if (!tg3_readphy(tp, MII_BMCR, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_BMSR, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); + + val = 0; + if (!tg3_readphy(tp, MII_ADVERTISE, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_LPA, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); + + val = 0; + if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { + if (!tg3_readphy(tp, MII_CTRL1000, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_STAT1000, ®)) + val |= (reg & 0xffff); + } + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); + + if (!tg3_readphy(tp, MII_PHYADDR, ®)) + val = reg << 16; + else + val = 0; + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); + + tg3_generate_fw_event(tp); + } + + /* tp->lock is held. */ + static void tg3_stop_fw(struct tg3 *tp) + { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + /* Wait for RX cpu to ACK the previous event. */ + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); + + tg3_generate_fw_event(tp); + + /* Wait for RX cpu to ACK this event. */ + tg3_wait_for_event_ack(tp); + } + } + + /* tp->lock is held. */ + static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) + { + tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC1); + + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_INIT || + kind == RESET_KIND_SUSPEND) + tg3_ape_driver_state_change(tp, kind); + } + + /* tp->lock is held. */ + static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) + { + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START_DONE); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD_DONE); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_SHUTDOWN) + tg3_ape_driver_state_change(tp, kind); + } + + /* tp->lock is held. */ + static void tg3_write_sig_legacy(struct tg3 *tp, int kind) + { + if (tg3_flag(tp, ENABLE_ASF)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } + } + + static int tg3_poll_fw(struct tg3 *tp) + { + int i; + u32 val; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* Wait up to 20ms for init done. */ + for (i = 0; i < 200; i++) { + if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) + return 0; + udelay(100); + } + return -ENODEV; + } + + /* Wait for firmware initialization to complete. */ + for (i = 0; i < 100000; i++) { + tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + udelay(10); + } + + /* Chip might not be fitted with firmware. Some Sun onboard + * parts are configured like that. So don't signal the timeout + * of the above loop as an error, but do report the lack of + * running firmware once. + */ + if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + + netdev_info(tp->dev, "No firmware running\n"); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + /* The 57765 A0 needs a little more + * time to do some important work. + */ + mdelay(10); + } + + return 0; + } + + static void tg3_link_report(struct tg3 *tp) + { + if (!netif_carrier_ok(tp->dev)) { + netif_info(tp, link, tp->dev, "Link is down\n"); + tg3_ump_link_report(tp); + } else if (netif_msg_link(tp)) { + netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", + (tp->link_config.active_speed == SPEED_1000 ? + 1000 : + (tp->link_config.active_speed == SPEED_100 ? + 100 : 10)), + (tp->link_config.active_duplex == DUPLEX_FULL ? + "full" : "half")); + + netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", + (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? + "on" : "off", + (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? + "on" : "off"); + + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + netdev_info(tp->dev, "EEE is %s\n", + tp->setlpicnt ? "enabled" : "disabled"); + + tg3_ump_link_report(tp); + } + } + + static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) + { + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_PAUSE_CAP; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_PAUSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + else + miireg = 0; + + return miireg; + } + + static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) + { + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_1000XPAUSE; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_1000XPSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; + else + miireg = 0; + + return miireg; + } + + static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) + { + u8 cap = 0; + + if (lcladv & ADVERTISE_1000XPAUSE) { + if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + else if (rmtadv & LPA_1000XPAUSE_ASYM) + cap = FLOW_CTRL_RX; + } else { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) + cap = FLOW_CTRL_TX; + } + + return cap; + } + + static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) + { + u8 autoneg; + u8 flowctrl = 0; + u32 old_rx_mode = tp->rx_mode; + u32 old_tx_mode = tp->tx_mode; + + if (tg3_flag(tp, USE_PHYLIB)) + autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; + else + autoneg = tp->link_config.autoneg; + + if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); + else + flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + } else + flowctrl = tp->link_config.flowctrl; + + tp->link_config.active_flowctrl = flowctrl; + + if (flowctrl & FLOW_CTRL_RX) + tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; + else + tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; + + if (old_rx_mode != tp->rx_mode) + tw32_f(MAC_RX_MODE, tp->rx_mode); + + if (flowctrl & FLOW_CTRL_TX) + tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; + else + tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; + + if (old_tx_mode != tp->tx_mode) + tw32_f(MAC_TX_MODE, tp->tx_mode); + } + + static void tg3_adjust_link(struct net_device *dev) + { + u8 oldflowctrl, linkmesg = 0; + u32 mac_mode, lcl_adv, rmt_adv; + struct tg3 *tp = netdev_priv(dev); + struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + spin_lock_bh(&tp->lock); + + mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | + MAC_MODE_HALF_DUPLEX); + + oldflowctrl = tp->link_config.active_flowctrl; + + if (phydev->link) { + lcl_adv = 0; + rmt_adv = 0; + + if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) + mac_mode |= MAC_MODE_PORT_MODE_MII; + else if (phydev->speed == SPEED_1000 || + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (phydev->duplex == DUPLEX_HALF) + mac_mode |= MAC_MODE_HALF_DUPLEX; + else { + lcl_adv = tg3_advert_flowctrl_1000T( + tp->link_config.flowctrl); + + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + } + + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } else + mac_mode |= MAC_MODE_PORT_MODE_GMII; + + if (mac_mode != tp->mac_mode) { + tp->mac_mode = mac_mode; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + if (phydev->speed == SPEED_10) + tw32(MAC_MI_STAT, + MAC_MI_STAT_10MBPS_MODE | + MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + else + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + } + + if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); + else + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); + + if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) || + (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) || + phydev->speed != tp->link_config.active_speed || + phydev->duplex != tp->link_config.active_duplex || + oldflowctrl != tp->link_config.active_flowctrl) + linkmesg = 1; + + tp->link_config.active_speed = phydev->speed; + tp->link_config.active_duplex = phydev->duplex; + + spin_unlock_bh(&tp->lock); + + if (linkmesg) + tg3_link_report(tp); + } + + static int tg3_phy_init(struct tg3 *tp) + { + struct phy_device *phydev; + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) + return 0; + + /* Bring the PHY back to a known state. */ + tg3_bmcr_reset(tp); + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + /* Attach the MAC to the PHY. */ + phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, + phydev->dev_flags, phydev->interface); + if (IS_ERR(phydev)) { + dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* Mask with MAC supported features. */ + switch (phydev->interface) { + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_RGMII: + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + phydev->supported &= (PHY_GBIT_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + } + /* fallthru */ + case PHY_INTERFACE_MODE_MII: + phydev->supported &= (PHY_BASIC_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + default: + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + return -EINVAL; + } + + tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; + + phydev->advertising = phydev->supported; + + return 0; + } + + static void tg3_phy_start(struct tg3 *tp) + { + struct phy_device *phydev; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + phydev->speed = tp->link_config.orig_speed; + phydev->duplex = tp->link_config.orig_duplex; + phydev->autoneg = tp->link_config.orig_autoneg; + phydev->advertising = tp->link_config.orig_advertising; + } + + phy_start(phydev); + + phy_start_aneg(phydev); + } + + static void tg3_phy_stop(struct tg3 *tp) + { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + } + + static void tg3_phy_fini(struct tg3 *tp) + { + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; + } + } + + static int tg3_phy_set_extloopbk(struct tg3 *tp) + { + int err; + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + return 0; + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + MII_TG3_AUXCTL_ACTL_EXTLOOPBK | + 0x4c20); + goto done; + } + + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (err) + return err; + + val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); + + done: + return err; + } + + static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) + { + u32 phytest; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; + else + phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; + tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } + } + + static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) + { + u32 reg; + + if (!tg3_flag(tp, 5705_PLUS) || + (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + tg3_phy_fet_toggle_apd(tp, enable); + return; + } + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_SCR5_SEL | + MII_TG3_MISC_SHDW_SCR5_LPED | + MII_TG3_MISC_SHDW_SCR5_DLPTLM | + MII_TG3_MISC_SHDW_SCR5_SDTL | + MII_TG3_MISC_SHDW_SCR5_C125OE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) + reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_APD_SEL | + MII_TG3_MISC_SHDW_APD_WKTM_84MS; + if (enable) + reg |= MII_TG3_MISC_SHDW_APD_ENABLE; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + } + + static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) + { + u32 phy; + + if (!tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 ephy; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { + u32 reg = MII_TG3_FET_SHDW_MISCCTRL; + + tg3_writephy(tp, MII_TG3_FET_TEST, + ephy | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, reg, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; + else + phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; + tg3_writephy(tp, reg, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, ephy); + } + } else { + int ret; + + ret = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); + if (!ret) { + if (enable) + phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + else + phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, phy); + } + } + } + + static void tg3_phy_set_wirespeed(struct tg3 *tp) + { + int ret; + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) + return; + + ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); + if (!ret) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, + val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); + } + + static void tg3_phy_apply_otp(struct tg3 *tp) + { + u32 otp, phy; + + if (!tp->phy_otp) + return; + + otp = tp->phy_otp; + + if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) + return; + + phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); + phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); + + phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | + ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); + + phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); + phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); + + phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); + + phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); + + phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | + ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) + { + u32 val; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + tp->setlpicnt = 0; + + if (tp->link_config.autoneg == AUTONEG_ENABLE && + current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL && + (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_1000)) { + u32 eeectl; + + if (tp->link_config.active_speed == SPEED_1000) + eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; + else + eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; + + tw32(TG3_CPMU_EEE_CTRL, eeectl); + + tg3_phy_cl45_read(tp, MDIO_MMD_AN, + TG3_CL45_D7_EEERES_STAT, &val); + + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tp->setlpicnt = 2; + } + + if (!tp->setlpicnt) { + if (current_link_up == 1 && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); + } + } + + static void tg3_phy_eee_enable(struct tg3 *tp) + { + u32 val; + + if (tp->link_config.active_speed == SPEED_1000 && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); + } + + static int tg3_wait_macro_done(struct tg3 *tp) + { + int limit = 100; + + while (limit--) { + u32 tmp32; + + if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { + if ((tmp32 & 0x1000) == 0) + break; + } + } + if (limit < 0) + return -EBUSY; + + return 0; + } + + static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) + { + static const u32 test_pat[4][6] = { + { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, + { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, + { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, + { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } + }; + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, + test_pat[chan][i]); + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + for (i = 0; i < 6; i += 2) { + u32 low, high; + + if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || + tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + low &= 0x7fff; + high &= 0x000f; + if (low != test_pat[chan][i] || + high != test_pat[chan][i+1]) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); + + return -EBUSY; + } + } + } + + return 0; + } + + static int tg3_phy_reset_chanpat(struct tg3 *tp) + { + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) + return -EBUSY; + } + + return 0; + } + + static int tg3_phy_reset_5703_4_5(struct tg3 *tp) + { + u32 reg32, phy9_orig; + int retries, do_phy_reset, err; + + retries = 10; + do_phy_reset = 1; + do { + if (do_phy_reset) { + err = tg3_bmcr_reset(tp); + if (err) + return err; + do_phy_reset = 0; + } + + /* Disable transmitter and interrupt. */ + if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) + continue; + + reg32 |= 0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + + /* Set full-duplex, 1000 mbps. */ + tg3_writephy(tp, MII_BMCR, + BMCR_FULLDPLX | BMCR_SPEED1000); + + /* Set to master mode. */ + if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) + continue; + + tg3_writephy(tp, MII_CTRL1000, + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); + + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (err) + return err; + + /* Block the PHY control access. */ + tg3_phydsp_write(tp, 0x8005, 0x0800); + + err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); + if (!err) + break; + } while (--retries); + + err = tg3_phy_reset_chanpat(tp); + if (err) + return err; + + tg3_phydsp_write(tp, 0x8005, 0x0000); + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + + tg3_writephy(tp, MII_CTRL1000, phy9_orig); + + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { + reg32 &= ~0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + } else if (!err) + err = -EBUSY; + + return err; + } + + /* This will reset the tigon3 PHY if there is no valid + * link unless the FORCE argument is non-zero. + */ + static int tg3_phy_reset(struct tg3 *tp) + { + u32 val, cpmuctrl; + int err; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + } + err = tg3_readphy(tp, MII_BMSR, &val); + err |= tg3_readphy(tp, MII_BMSR, &val); + if (err != 0) + return -EBUSY; + + if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + err = tg3_phy_reset_5703_4_5(tp); + if (err) + return err; + goto out; + } + + cpmuctrl = 0; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + cpmuctrl = tr32(TG3_CPMU_CTRL); + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) + tw32(TG3_CPMU_CTRL, + cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); + } + + err = tg3_bmcr_reset(tp); + if (err) + return err; + + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { + val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; + tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); + + tw32(TG3_CPMU_CTRL, cpmuctrl); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == + CPMU_LSPD_1000MB_MACCLK_12_5) { + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + udelay(40); + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + } + + if (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) + return 0; + + tg3_phy_apply_otp(tp); + + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + else + tg3_phy_toggle_apd(tp, false); + + out: + if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x201f, 0x2aaa); + tg3_phydsp_write(tp, 0x000a, 0x0323); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + } + + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x000a, 0x310b); + tg3_phydsp_write(tp, 0x201f, 0x9506); + tg3_phydsp_write(tp, 0x401f, 0x14e2); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); + tg3_writephy(tp, MII_TG3_TEST1, + MII_TG3_TEST1_TRIM_EN | 0x4); + } else + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + } + + /* Set Extended packet length bit (bit 14) on all chips that */ + /* support jumbo frames */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + } else if (tg3_flag(tp, JUMBO_CAPABLE)) { + /* Set bit 14 with read-modify-write to preserve other bits */ + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (!err) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); + } + + /* Set phy register 0x10 bit 0 to high fifo elasticity to support + * jumbo frames transmission. + */ + if (tg3_flag(tp, JUMBO_CAPABLE)) { + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* adjust output voltage */ + tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); + } + + tg3_phy_toggle_automdix(tp, 1); + tg3_phy_set_wirespeed(tp); + return 0; + } + + #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 + #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 + #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ + TG3_GPIO_MSG_NEED_VAUX) + #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ + ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ + (TG3_GPIO_MSG_DRVR_PRES << 4) | \ + (TG3_GPIO_MSG_DRVR_PRES << 8) | \ + (TG3_GPIO_MSG_DRVR_PRES << 12)) + + #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ + ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ + (TG3_GPIO_MSG_NEED_VAUX << 4) | \ + (TG3_GPIO_MSG_NEED_VAUX << 8) | \ + (TG3_GPIO_MSG_NEED_VAUX << 12)) + + static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) + { + u32 status, shift; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); + else + status = tr32(TG3_CPMU_DRV_STATUS); + + shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; + status &= ~(TG3_GPIO_MSG_MASK << shift); + status |= (newstat << shift); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); + else + tw32(TG3_CPMU_DRV_STATUS, status); + + return status >> TG3_APE_GPIO_MSG_SHIFT; + } + + static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) + { + if (!tg3_flag(tp, IS_NIC)) + return 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return -EIO; + + tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); + + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); + } else { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + return 0; + } + + static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) + { + u32 grc_local_ctrl; + + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) + return; + + grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) + { + if (!tg3_flag(tp, IS_NIC)) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + (GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1), + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ + u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + tp->grc_local_ctrl; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else { + u32 no_gpio2; + u32 grc_local_ctrl = 0; + + /* Workaround to prevent overdrawing Amps. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + /* On 5753 and variants, GPIO2 cannot be used. */ + no_gpio2 = tp->nic_sram_data_cfg & + NIC_SRAM_DATA_CFG_NO_GPIO2; + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + GRC_LCLCTRL_GPIO_OUTPUT2; + if (no_gpio2) { + grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT2); + } + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; + + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + if (!no_gpio2) { + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + } + } + + static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) + { + u32 msg = 0; + + /* Serialize power state transitions */ + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return; + + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) + msg = TG3_GPIO_MSG_NEED_VAUX; + + msg = tg3_set_function_status(tp, msg); + + if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) + goto done; + + if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); + + done: + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); + } + + static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) + { + bool need_vaux = false; + + /* The GPIOs do something completely different on 57765. */ + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_frob_aux_power_5717(tp, include_wol ? + tg3_flag(tp, WOL_ENABLE) != 0 : 0); + return; + } + + if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { + struct net_device *dev_peer; + + dev_peer = pci_get_drvdata(tp->pdev_peer); + + /* remove_one() may have been run on the peer. */ + if (dev_peer) { + struct tg3 *tp_peer = netdev_priv(dev_peer); + + if (tg3_flag(tp_peer, INIT_COMPLETE)) + return; + + if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || + tg3_flag(tp_peer, ENABLE_ASF)) + need_vaux = true; + } + } + + if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || + tg3_flag(tp, ENABLE_ASF)) + need_vaux = true; + + if (need_vaux) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); + } + + static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) + { + if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) + return 1; + else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { + if (speed != SPEED_10) + return 1; + } else if (speed == SPEED_10) + return 1; + + return 0; + } + + static int tg3_setup_phy(struct tg3 *, int); + static int tg3_halt_cpu(struct tg3 *, u32); + + static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) + { + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); + u32 serdes_cfg = tr32(MAC_SERDES_CFG); + + sg_dig_ctrl |= + SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; + tw32(SG_DIG_CTRL, sg_dig_ctrl); + tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); + } + return; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_bmcr_reset(tp); + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + return; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 phytest; + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_ADVERTISE, 0); + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { + phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; + tg3_writephy(tp, + MII_TG3_FET_SHDW_AUXMODE4, + phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } + return; + } else if (do_low_power) { + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); + + val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | + MII_TG3_AUXCTL_PCTL_VREG_11V; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); + } + + /* The PHY should not be powered down on some chips because + * of bugs. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) + return; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + val |= CPMU_LSPD_1000MB_MACCLK_12_5; + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + + tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); + } + + /* tp->lock is held. */ + static int tg3_nvram_lock(struct tg3 *tp) + { + if (tg3_flag(tp, NVRAM)) { + int i; + + if (tp->nvram_lock_cnt == 0) { + tw32(NVRAM_SWARB, SWARB_REQ_SET1); + for (i = 0; i < 8000; i++) { + if (tr32(NVRAM_SWARB) & SWARB_GNT1) + break; + udelay(20); + } + if (i == 8000) { + tw32(NVRAM_SWARB, SWARB_REQ_CLR1); + return -ENODEV; + } + } + tp->nvram_lock_cnt++; + } + return 0; + } + + /* tp->lock is held. */ + static void tg3_nvram_unlock(struct tg3 *tp) + { + if (tg3_flag(tp, NVRAM)) { + if (tp->nvram_lock_cnt > 0) + tp->nvram_lock_cnt--; + if (tp->nvram_lock_cnt == 0) + tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); + } + } + + /* tp->lock is held. */ + static void tg3_enable_nvram_access(struct tg3 *tp) + { + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); + } + } + + /* tp->lock is held. */ + static void tg3_disable_nvram_access(struct tg3 *tp) + { + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); + } + } + + static int tg3_nvram_read_using_eeprom(struct tg3 *tp, + u32 offset, u32 *val) + { + u32 tmp; + int i; + + if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) + return -EINVAL; + + tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | + EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, + tmp | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + ((offset << EEPROM_ADDR_ADDR_SHIFT) & + EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_READ | EEPROM_ADDR_START); + + for (i = 0; i < 1000; i++) { + tmp = tr32(GRC_EEPROM_ADDR); + + if (tmp & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(tmp & EEPROM_ADDR_COMPLETE)) + return -EBUSY; + + tmp = tr32(GRC_EEPROM_DATA); + + /* + * The data will always be opposite the native endian + * format. Perform a blind byteswap to compensate. + */ + *val = swab32(tmp); + + return 0; + } + + #define NVRAM_CMD_TIMEOUT 10000 + + static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) + { + int i; + + tw32(NVRAM_CMD, nvram_cmd); + for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { + udelay(10); + if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { + udelay(10); + break; + } + } + + if (i == NVRAM_CMD_TIMEOUT) + return -EBUSY; + + return 0; + } + + static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) + { + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr / tp->nvram_pagesize) << + ATMEL_AT45DB0X1B_PAGE_POS) + + (addr % tp->nvram_pagesize); + + return addr; + } + + static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) + { + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * + tp->nvram_pagesize) + + (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); + + return addr; + } + + /* NOTE: Data read in from NVRAM is byteswapped according to + * the byteswapping settings for all other register accesses. + * tg3 devices are BE devices, so on a BE machine, the data + * returned will be exactly as it is seen in NVRAM. On a LE + * machine, the 32-bit value will be byteswapped. + */ + static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) + { + int ret; + + if (!tg3_flag(tp, NVRAM)) + return tg3_nvram_read_using_eeprom(tp, offset, val); + + offset = tg3_nvram_phys_addr(tp, offset); + + if (offset > NVRAM_ADDR_MSK) + return -EINVAL; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + + tw32(NVRAM_ADDR, offset); + ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); + + if (ret == 0) + *val = tr32(NVRAM_RDDATA); + + tg3_disable_nvram_access(tp); + + tg3_nvram_unlock(tp); + + return ret; + } + + /* Ensures NVRAM data is in bytestream format. */ + static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) + { + u32 v; + int res = tg3_nvram_read(tp, offset, &v); + if (!res) + *val = cpu_to_be32(v); + return res; + } + + #define RX_CPU_SCRATCH_BASE 0x30000 + #define RX_CPU_SCRATCH_SIZE 0x04000 + #define TX_CPU_SCRATCH_BASE 0x34000 + #define TX_CPU_SCRATCH_SIZE 0x04000 + + /* tp->lock is held. */ + static int tg3_halt_cpu(struct tg3 *tp, u32 offset) + { + int i; + + BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val = tr32(GRC_VCPU_EXT_CTRL); + + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); + return 0; + } + if (offset == RX_CPU_BASE) { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + + tw32(offset + CPU_STATE, 0xffffffff); + tw32_f(offset + CPU_MODE, CPU_MODE_HALT); + udelay(10); + } else { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + } + + if (i >= 10000) { + netdev_err(tp->dev, "%s timed out, %s CPU\n", + __func__, offset == RX_CPU_BASE ? "RX" : "TX"); + return -ENODEV; + } + + /* Clear firmware's nvram arbitration. */ + if (tg3_flag(tp, NVRAM)) + tw32(NVRAM_SWARB, SWARB_REQ_CLR0); + return 0; + } + + struct fw_info { + unsigned int fw_base; + unsigned int fw_len; + const __be32 *fw_data; + }; + + /* tp->lock is held. */ + static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, + u32 cpu_scratch_base, int cpu_scratch_size, + struct fw_info *info) + { + int err, lock_err, i; + void (*write_op)(struct tg3 *, u32, u32); + + if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { + netdev_err(tp->dev, + "%s: Trying to load TX cpu firmware which is 5705\n", + __func__); + return -EINVAL; + } + + if (tg3_flag(tp, 5705_PLUS)) + write_op = tg3_write_mem; + else + write_op = tg3_write_indirect_reg32; + + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; + + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); + for (i = 0; i < (info->fw_len / sizeof(u32)); i++) + write_op(tp, (cpu_scratch_base + + (info->fw_base & 0xffff) + + (i * sizeof(u32))), + be32_to_cpu(info->fw_data[i])); + + err = 0; + + out: + return err; + } + + /* tp->lock is held. */ + static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) + { + struct fw_info info; + const __be32 *fw_data; + int err, i; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, + RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, + TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + /* Now startup only the RX cpu. */ + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) + break; + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " + "should be %08x\n", __func__, + tr32(RX_CPU_BASE + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); + + return 0; + } + + /* tp->lock is held. */ + static int tg3_load_tso_firmware(struct tg3 *tp) + { + struct fw_info info; + const __be32 *fw_data; + unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; + int err, i; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + return 0; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + cpu_scratch_size = tp->fw_len; + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + cpu_base = RX_CPU_BASE; + cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; + } else { + cpu_base = TX_CPU_BASE; + cpu_scratch_base = TX_CPU_SCRATCH_BASE; + cpu_scratch_size = TX_CPU_SCRATCH_SIZE; + } + + err = tg3_load_firmware_cpu(tp, cpu_base, + cpu_scratch_base, cpu_scratch_size, + &info); + if (err) + return err; + + /* Now startup the cpu. */ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(cpu_base + CPU_PC) == info.fw_base) + break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, + "%s fails to set CPU PC, is %08x should be %08x\n", + __func__, tr32(cpu_base + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); + return 0; + } + + + /* tp->lock is held. */ + static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) + { + u32 addr_high, addr_low; + int i; + + addr_high = ((tp->dev->dev_addr[0] << 8) | + tp->dev->dev_addr[1]); + addr_low = ((tp->dev->dev_addr[2] << 24) | + (tp->dev->dev_addr[3] << 16) | + (tp->dev->dev_addr[4] << 8) | + (tp->dev->dev_addr[5] << 0)); + for (i = 0; i < 4; i++) { + if (i == 1 && skip_mac_1) + continue; + tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + for (i = 0; i < 12; i++) { + tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); + } + } + + addr_high = (tp->dev->dev_addr[0] + + tp->dev->dev_addr[1] + + tp->dev->dev_addr[2] + + tp->dev->dev_addr[3] + + tp->dev->dev_addr[4] + + tp->dev->dev_addr[5]) & + TX_BACKOFF_SEED_MASK; + tw32(MAC_TX_BACKOFF_SEED, addr_high); + } + + static void tg3_enable_register_access(struct tg3 *tp) + { + /* + * Make sure register accesses (indirect or otherwise) will function + * correctly. + */ + pci_write_config_dword(tp->pdev, + TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); + } + + static int tg3_power_up(struct tg3 *tp) + { + int err; + + tg3_enable_register_access(tp); + + err = pci_set_power_state(tp->pdev, PCI_D0); + if (!err) { + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + } else { + netdev_err(tp->dev, "Transition to D0 failed\n"); + } + + return err; + } + + static int tg3_power_down_prepare(struct tg3 *tp) + { + u32 misc_host_ctrl; + bool device_should_wake, do_low_power; + + tg3_enable_register_access(tp); + + /* Restore the CLKREQ setting. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 lnkctl; + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + lnkctl); + } + + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + tw32(TG3PCI_MISC_HOST_CTRL, + misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); + + device_should_wake = device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE); + + if (tg3_flag(tp, USE_PHYLIB)) { + do_low_power = false; + if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && + !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + struct phy_device *phydev; + u32 phyid, advertising; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + + tp->link_config.orig_speed = phydev->speed; + tp->link_config.orig_duplex = phydev->duplex; + tp->link_config.orig_autoneg = phydev->autoneg; + tp->link_config.orig_advertising = phydev->advertising; + + advertising = ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half; + + if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { + if (tg3_flag(tp, WOL_SPEED_100MB)) + advertising |= + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full; + else + advertising |= ADVERTISED_10baseT_Full; + } + + phydev->advertising = advertising; + + phy_start_aneg(phydev); + + phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; + if (phyid != PHY_ID_BCMAC131) { + phyid &= PHY_BCM_OUI_MASK; + if (phyid == PHY_BCM_OUI_1 || + phyid == PHY_BCM_OUI_2 || + phyid == PHY_BCM_OUI_3) + do_low_power = true; + } + } + } else { + do_low_power = true; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + tp->link_config.orig_speed = tp->link_config.speed; + tp->link_config.orig_duplex = tp->link_config.duplex; + tp->link_config.orig_autoneg = tp->link_config.autoneg; + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + tp->link_config.speed = SPEED_10; + tp->link_config.duplex = DUPLEX_HALF; + tp->link_config.autoneg = AUTONEG_ENABLE; + tg3_setup_phy(tp, 0); + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val; + + val = tr32(GRC_VCPU_EXT_CTRL); + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); + } else if (!tg3_flag(tp, ENABLE_ASF)) { + int i; + u32 val; + + for (i = 0; i < 200; i++) { + tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + msleep(1); + } + } + if (tg3_flag(tp, WOL_CAP)) + tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | + WOL_DRV_STATE_SHUTDOWN | + WOL_DRV_WOL | + WOL_SET_MAGIC_PKT); + + if (device_should_wake) { + u32 mac_mode; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + if (do_low_power && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_PWRCTL, + MII_TG3_AUXCTL_PCTL_WOL_EN | + MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); + udelay(40); + } + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + mac_mode = MAC_MODE_PORT_MODE_GMII; + else + mac_mode = MAC_MODE_PORT_MODE_MII; + + mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700) { + u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? + SPEED_100 : SPEED_10; + if (tg3_5700_link_polarity(tp, speed)) + mac_mode |= MAC_MODE_LINK_POLARITY; + else + mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + } else { + mac_mode = MAC_MODE_PORT_MODE_TBI; + } + + if (!tg3_flag(tp, 5750_PLUS)) + tw32(MAC_LED_CTRL, tp->led_ctrl); + + mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; + if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && + (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) + mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; + + if (tg3_flag(tp, ENABLE_APE)) + mac_mode |= MAC_MODE_APE_TX_EN | + MAC_MODE_APE_RX_EN | + MAC_MODE_TDE_ENABLE; + + tw32_f(MAC_MODE, mac_mode); + udelay(100); + + tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); + udelay(10); + } + + if (!tg3_flag(tp, WOL_SPEED_100MB) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 base_val; + + base_val = tp->pci_clock_ctrl; + base_val |= (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | + CLOCK_CTRL_PWRDOWN_PLL133, 40); + } else if (tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, CPMU_PRESENT) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* do nothing */ + } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { + u32 newbits1, newbits2; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_ALTCLK); + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } else if (tg3_flag(tp, 5705_PLUS)) { + newbits1 = CLOCK_CTRL_625_CORE; + newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; + } else { + newbits1 = CLOCK_CTRL_ALTCLK; + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, + 40); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, + 40); + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 newbits3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_44MHZ_CORE); + } else { + newbits3 = CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, + tp->pci_clock_ctrl | newbits3, 40); + } + } + + if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) + tg3_power_down_phy(tp, do_low_power); + + tg3_frob_aux_power(tp, true); + + /* Workaround for unstable PLL clock */ + if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || + (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { + u32 val = tr32(0x7d00); + + val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); + tw32(0x7d00, val); + if (!tg3_flag(tp, ENABLE_ASF)) { + int err; + + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + } + } + + tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + + return 0; + } + + static void tg3_power_down(struct tg3 *tp) + { + tg3_power_down_prepare(tp); + + pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); + pci_set_power_state(tp->pdev, PCI_D3hot); + } + + static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) + { + switch (val & MII_TG3_AUX_STAT_SPDMASK) { + case MII_TG3_AUX_STAT_10HALF: + *speed = SPEED_10; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_10FULL: + *speed = SPEED_10; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_100HALF: + *speed = SPEED_100; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_100FULL: + *speed = SPEED_100; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_1000HALF: + *speed = SPEED_1000; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_1000FULL: + *speed = SPEED_1000; + *duplex = DUPLEX_FULL; + break; + + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : + SPEED_10; + *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : + DUPLEX_HALF; + break; + } + *speed = SPEED_INVALID; + *duplex = DUPLEX_INVALID; + break; + } + } + + static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) + { + int err = 0; + u32 val, new_adv; + + new_adv = ADVERTISE_CSMA; + if (advertise & ADVERTISED_10baseT_Half) + new_adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + new_adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + new_adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + new_adv |= ADVERTISE_100FULL; + + new_adv |= tg3_advert_flowctrl_1000T(flowctrl); + + err = tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (err) + goto done; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + goto done; + + new_adv = 0; + if (advertise & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000HALF; + if (advertise & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000FULL; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + goto done; + + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); + + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (!err) { + u32 err2; + + val = 0; + /* Advertise 100-BaseTX EEE ability */ + if (advertise & ADVERTISED_100baseT_Full) + val |= MDIO_AN_EEE_ADV_100TX; + /* Advertise 1000-BaseT EEE ability */ + if (advertise & ADVERTISED_1000baseT_Full) + val |= MDIO_AN_EEE_ADV_1000T; + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + if (err) + val = 0; + + switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + case ASIC_REV_5717: + case ASIC_REV_57765: + case ASIC_REV_5719: + /* If we advertised any eee advertisements above... */ + if (val) + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO | + MII_TG3_DSP_TAP26_OPCSINPT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + /* Fall through */ + case ASIC_REV_5720: + if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | + MII_TG3_DSP_CH34TP2_HIBW01); + } + + err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + if (!err) + err = err2; + } + + done: + return err; + } + + static void tg3_phy_copper_begin(struct tg3 *tp) + { + u32 new_adv; + int i; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + new_adv = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) + new_adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + + tg3_phy_autoneg_cfg(tp, new_adv, + FLOW_CTRL_TX | FLOW_CTRL_RX); + } else if (tp->link_config.speed == SPEED_INVALID) { + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->link_config.advertising &= + ~(ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + } else { + /* Asking for a specific link mode. */ + if (tp->link_config.speed == SPEED_1000) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_1000baseT_Full; + else + new_adv = ADVERTISED_1000baseT_Half; + } else if (tp->link_config.speed == SPEED_100) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_100baseT_Full; + else + new_adv = ADVERTISED_100baseT_Half; + } else { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_10baseT_Full; + else + new_adv = ADVERTISED_10baseT_Half; + } + + tg3_phy_autoneg_cfg(tp, new_adv, + tp->link_config.flowctrl); + } + + if (tp->link_config.autoneg == AUTONEG_DISABLE && + tp->link_config.speed != SPEED_INVALID) { + u32 bmcr, orig_bmcr; + + tp->link_config.active_speed = tp->link_config.speed; + tp->link_config.active_duplex = tp->link_config.duplex; + + bmcr = 0; + switch (tp->link_config.speed) { + default: + case SPEED_10: + break; + + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + + case SPEED_1000: + bmcr |= BMCR_SPEED1000; + break; + } + + if (tp->link_config.duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + + if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && + (bmcr != orig_bmcr)) { + tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); + for (i = 0; i < 1500; i++) { + u32 tmp; + + udelay(10); + if (tg3_readphy(tp, MII_BMSR, &tmp) || + tg3_readphy(tp, MII_BMSR, &tmp)) + continue; + if (!(tmp & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + tg3_writephy(tp, MII_BMCR, bmcr); + udelay(40); + } + } else { + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } + } + + static int tg3_init_5401phy_dsp(struct tg3 *tp) + { + int err; + + /* Turn off tap power management. */ + /* Set Extended packet length bit */ + err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + + err |= tg3_phydsp_write(tp, 0x0012, 0x1804); + err |= tg3_phydsp_write(tp, 0x0013, 0x1204); + err |= tg3_phydsp_write(tp, 0x8006, 0x0132); + err |= tg3_phydsp_write(tp, 0x8006, 0x0232); + err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); + + udelay(40); + + return err; + } + + static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) + { + u32 adv_reg, all_mask = 0; + + if (mask & ADVERTISED_10baseT_Half) + all_mask |= ADVERTISE_10HALF; + if (mask & ADVERTISED_10baseT_Full) + all_mask |= ADVERTISE_10FULL; + if (mask & ADVERTISED_100baseT_Half) + all_mask |= ADVERTISE_100HALF; + if (mask & ADVERTISED_100baseT_Full) + all_mask |= ADVERTISE_100FULL; + + if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) + return 0; + + if ((adv_reg & ADVERTISE_ALL) != all_mask) + return 0; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 tg3_ctrl; + + all_mask = 0; + if (mask & ADVERTISED_1000baseT_Half) + all_mask |= ADVERTISE_1000HALF; + if (mask & ADVERTISED_1000baseT_Full) + all_mask |= ADVERTISE_1000FULL; + + if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) + return 0; + + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); + if (tg3_ctrl != all_mask) + return 0; + } + + return 1; + } + + static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) + { + u32 curadv, reqadv; + + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return 1; + + curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); + + if (tp->link_config.active_duplex == DUPLEX_FULL) { + if (curadv != reqadv) + return 0; + + if (tg3_flag(tp, PAUSE_AUTONEG)) + tg3_readphy(tp, MII_LPA, rmtadv); + } else { + /* Reprogram the advertisement register, even if it + * does not affect the current link. If the link + * gets renegotiated in the future, we can save an + * additional renegotiation cycle by advertising + * it correctly in the first place. + */ + if (curadv != reqadv) { + *lcladv &= ~(ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); + } + } + + return 1; + } + + static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) + { + int current_link_up; + u32 bmsr, val; + u32 lcl_adv, rmt_adv; + u16 current_speed; + u8 current_duplex; + int i, err; + + tw32(MAC_EVENT, 0); + + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); + + /* Some third-party PHYs need to be reset on link going + * down. + */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && + netif_carrier_ok(tp->dev)) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + !(bmsr & BMSR_LSTATUS)) + force_reset = 1; + } + if (force_reset) + tg3_phy_reset(tp); + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (tg3_readphy(tp, MII_BMSR, &bmsr) || + !tg3_flag(tp, INIT_COMPLETE)) + bmsr = 0; + + if (!(bmsr & BMSR_LSTATUS)) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + tg3_readphy(tp, MII_BMSR, &bmsr); + for (i = 0; i < 1000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + + if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == + TG3_PHY_REV_BCM5401_B0 && + !(bmsr & BMSR_LSTATUS) && + tp->link_config.active_speed == SPEED_1000) { + err = tg3_phy_reset(tp); + if (!err) + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + } + } + } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { + /* 5701 {A0,B0} CRC bug workaround */ + tg3_writephy(tp, 0x15, 0x0a75); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + } + + /* Clear pending interrupts... */ + tg3_readphy(tp, MII_TG3_ISTAT, &val); + tg3_readphy(tp, MII_TG3_ISTAT, &val); + + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) + tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); + else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) + tg3_writephy(tp, MII_TG3_IMASK, ~0); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + else + tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); + } + + current_link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + &val); + if (!err && !(val & (1 << 10))) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + val | (1 << 10)); + goto relink; + } + } + + bmsr = 0; + for (i = 0; i < 100; i++) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + break; + udelay(40); + } + + if (bmsr & BMSR_LSTATUS) { + u32 aux_stat, bmcr; + + tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); + for (i = 0; i < 2000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && + aux_stat) + break; + } + + tg3_aux_stat_to_speed_duplex(tp, aux_stat, + ¤t_speed, + ¤t_duplex); + + bmcr = 0; + for (i = 0; i < 200; i++) { + tg3_readphy(tp, MII_BMCR, &bmcr); + if (tg3_readphy(tp, MII_BMCR, &bmcr)) + continue; + if (bmcr && bmcr != 0x7fff) + break; + udelay(10); + } + + lcl_adv = 0; + rmt_adv = 0; + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + if ((bmcr & BMCR_ANENABLE) && + tg3_copper_is_advertising_all(tp, + tp->link_config.advertising)) { + if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, + &rmt_adv)) + current_link_up = 1; + } + } else { + if (!(bmcr & BMCR_ANENABLE) && + tp->link_config.speed == current_speed && + tp->link_config.duplex == current_duplex && + tp->link_config.flowctrl == + tp->link_config.active_flowctrl) { + current_link_up = 1; + } + } + + if (current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } + + relink: + if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tg3_phy_copper_begin(tp); + + tg3_readphy(tp, MII_BMSR, &bmsr); + if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || + (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + current_link_up = 1; + } + + tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; + if (current_link_up == 1) { + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + if (current_link_up == 1 && + tg3_5700_link_polarity(tp, tp->link_config.active_speed)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + else + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + + /* ??? Without this setting Netgear GA302T PHY does not + * ??? send/receive packets... + */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && + tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { + tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tg3_phy_eee_adjust(tp, current_link_up); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + /* Polled via timer. */ + tw32_f(MAC_EVENT, 0); + } else { + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + } + udelay(40); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && + current_link_up == 1 && + tp->link_config.active_speed == SPEED_1000 && + (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { + udelay(120); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + tg3_write_mem(tp, + NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC2); + } + + /* Prevent send BD corruption. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 oldlnkctl, newlnkctl; + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &oldlnkctl); + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; + else + newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; + if (newlnkctl != oldlnkctl) + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + + PCI_EXP_LNKCTL, newlnkctl); + } + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } + + return 0; + } + + struct tg3_fiber_aneginfo { + int state; + #define ANEG_STATE_UNKNOWN 0 + #define ANEG_STATE_AN_ENABLE 1 + #define ANEG_STATE_RESTART_INIT 2 + #define ANEG_STATE_RESTART 3 + #define ANEG_STATE_DISABLE_LINK_OK 4 + #define ANEG_STATE_ABILITY_DETECT_INIT 5 + #define ANEG_STATE_ABILITY_DETECT 6 + #define ANEG_STATE_ACK_DETECT_INIT 7 + #define ANEG_STATE_ACK_DETECT 8 + #define ANEG_STATE_COMPLETE_ACK_INIT 9 + #define ANEG_STATE_COMPLETE_ACK 10 + #define ANEG_STATE_IDLE_DETECT_INIT 11 + #define ANEG_STATE_IDLE_DETECT 12 + #define ANEG_STATE_LINK_OK 13 + #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 + #define ANEG_STATE_NEXT_PAGE_WAIT 15 + + u32 flags; + #define MR_AN_ENABLE 0x00000001 + #define MR_RESTART_AN 0x00000002 + #define MR_AN_COMPLETE 0x00000004 + #define MR_PAGE_RX 0x00000008 + #define MR_NP_LOADED 0x00000010 + #define MR_TOGGLE_TX 0x00000020 + #define MR_LP_ADV_FULL_DUPLEX 0x00000040 + #define MR_LP_ADV_HALF_DUPLEX 0x00000080 + #define MR_LP_ADV_SYM_PAUSE 0x00000100 + #define MR_LP_ADV_ASYM_PAUSE 0x00000200 + #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 + #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 + #define MR_LP_ADV_NEXT_PAGE 0x00001000 + #define MR_TOGGLE_RX 0x00002000 + #define MR_NP_RX 0x00004000 + + #define MR_LINK_OK 0x80000000 + + unsigned long link_time, cur_time; + + u32 ability_match_cfg; + int ability_match_count; + + char ability_match, idle_match, ack_match; + + u32 txconfig, rxconfig; + #define ANEG_CFG_NP 0x00000080 + #define ANEG_CFG_ACK 0x00000040 + #define ANEG_CFG_RF2 0x00000020 + #define ANEG_CFG_RF1 0x00000010 + #define ANEG_CFG_PS2 0x00000001 + #define ANEG_CFG_PS1 0x00008000 + #define ANEG_CFG_HD 0x00004000 + #define ANEG_CFG_FD 0x00002000 + #define ANEG_CFG_INVAL 0x00001f06 + + }; + #define ANEG_OK 0 + #define ANEG_DONE 1 + #define ANEG_TIMER_ENAB 2 + #define ANEG_FAILED -1 + + #define ANEG_STATE_SETTLE_TIME 10000 + + static int tg3_fiber_aneg_smachine(struct tg3 *tp, + struct tg3_fiber_aneginfo *ap) + { + u16 flowctrl; + unsigned long delta; + u32 rx_cfg_reg; + int ret; + + if (ap->state == ANEG_STATE_UNKNOWN) { + ap->rxconfig = 0; + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + } + ap->cur_time++; + + if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { + rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); + + if (rx_cfg_reg != ap->ability_match_cfg) { + ap->ability_match_cfg = rx_cfg_reg; + ap->ability_match = 0; + ap->ability_match_count = 0; + } else { + if (++ap->ability_match_count > 1) { + ap->ability_match = 1; + ap->ability_match_cfg = rx_cfg_reg; + } + } + if (rx_cfg_reg & ANEG_CFG_ACK) + ap->ack_match = 1; + else + ap->ack_match = 0; + + ap->idle_match = 0; + } else { + ap->idle_match = 1; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->ack_match = 0; + + rx_cfg_reg = 0; + } + + ap->rxconfig = rx_cfg_reg; + ret = ANEG_OK; + + switch (ap->state) { + case ANEG_STATE_UNKNOWN: + if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) + ap->state = ANEG_STATE_AN_ENABLE; + + /* fallthru */ + case ANEG_STATE_AN_ENABLE: + ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); + if (ap->flags & MR_AN_ENABLE) { + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + + ap->state = ANEG_STATE_RESTART_INIT; + } else { + ap->state = ANEG_STATE_DISABLE_LINK_OK; + } + break; + + case ANEG_STATE_RESTART_INIT: + ap->link_time = ap->cur_time; + ap->flags &= ~(MR_NP_LOADED); + ap->txconfig = 0; + tw32(MAC_TX_AUTO_NEG, 0); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ret = ANEG_TIMER_ENAB; + ap->state = ANEG_STATE_RESTART; + + /* fallthru */ + case ANEG_STATE_RESTART: + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) + ap->state = ANEG_STATE_ABILITY_DETECT_INIT; + else + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_DISABLE_LINK_OK: + ret = ANEG_DONE; + break; + + case ANEG_STATE_ABILITY_DETECT_INIT: + ap->flags &= ~(MR_TOGGLE_TX); + ap->txconfig = ANEG_CFG_FD; + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + ap->txconfig |= ANEG_CFG_PS1; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + ap->txconfig |= ANEG_CFG_PS2; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ABILITY_DETECT; + break; + + case ANEG_STATE_ABILITY_DETECT: + if (ap->ability_match != 0 && ap->rxconfig != 0) + ap->state = ANEG_STATE_ACK_DETECT_INIT; + break; + + case ANEG_STATE_ACK_DETECT_INIT: + ap->txconfig |= ANEG_CFG_ACK; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ACK_DETECT; + + /* fallthru */ + case ANEG_STATE_ACK_DETECT: + if (ap->ack_match != 0) { + if ((ap->rxconfig & ~ANEG_CFG_ACK) == + (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { + ap->state = ANEG_STATE_COMPLETE_ACK_INIT; + } else { + ap->state = ANEG_STATE_AN_ENABLE; + } + } else if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + } + break; + + case ANEG_STATE_COMPLETE_ACK_INIT: + if (ap->rxconfig & ANEG_CFG_INVAL) { + ret = ANEG_FAILED; + break; + } + ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | + MR_LP_ADV_HALF_DUPLEX | + MR_LP_ADV_SYM_PAUSE | + MR_LP_ADV_ASYM_PAUSE | + MR_LP_ADV_REMOTE_FAULT1 | + MR_LP_ADV_REMOTE_FAULT2 | + MR_LP_ADV_NEXT_PAGE | + MR_TOGGLE_RX | + MR_NP_RX); + if (ap->rxconfig & ANEG_CFG_FD) + ap->flags |= MR_LP_ADV_FULL_DUPLEX; + if (ap->rxconfig & ANEG_CFG_HD) + ap->flags |= MR_LP_ADV_HALF_DUPLEX; + if (ap->rxconfig & ANEG_CFG_PS1) + ap->flags |= MR_LP_ADV_SYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_PS2) + ap->flags |= MR_LP_ADV_ASYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_RF1) + ap->flags |= MR_LP_ADV_REMOTE_FAULT1; + if (ap->rxconfig & ANEG_CFG_RF2) + ap->flags |= MR_LP_ADV_REMOTE_FAULT2; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_LP_ADV_NEXT_PAGE; + + ap->link_time = ap->cur_time; + + ap->flags ^= (MR_TOGGLE_TX); + if (ap->rxconfig & 0x0008) + ap->flags |= MR_TOGGLE_RX; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_NP_RX; + ap->flags |= MR_PAGE_RX; + + ap->state = ANEG_STATE_COMPLETE_ACK; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_COMPLETE_ACK: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + if ((ap->txconfig & ANEG_CFG_NP) == 0 && + !(ap->flags & MR_NP_RX)) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + ret = ANEG_FAILED; + } + } + } + break; + + case ANEG_STATE_IDLE_DETECT_INIT: + ap->link_time = ap->cur_time; + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_IDLE_DETECT; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_IDLE_DETECT: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + /* XXX another gem from the Broadcom driver :( */ + ap->state = ANEG_STATE_LINK_OK; + } + break; + + case ANEG_STATE_LINK_OK: + ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); + ret = ANEG_DONE; + break; + + case ANEG_STATE_NEXT_PAGE_WAIT_INIT: + /* ??? unimplemented */ + break; + + case ANEG_STATE_NEXT_PAGE_WAIT: + /* ??? unimplemented */ + break; + + default: + ret = ANEG_FAILED; + break; + } + + return ret; + } + + static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) + { + int res = 0; + struct tg3_fiber_aneginfo aninfo; + int status = ANEG_FAILED; + unsigned int tick; + u32 tmp; + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; + tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); + udelay(40); + + memset(&aninfo, 0, sizeof(aninfo)); + aninfo.flags |= MR_AN_ENABLE; + aninfo.state = ANEG_STATE_UNKNOWN; + aninfo.cur_time = 0; + tick = 0; + while (++tick < 195000) { + status = tg3_fiber_aneg_smachine(tp, &aninfo); + if (status == ANEG_DONE || status == ANEG_FAILED) + break; + + udelay(1); + } + + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + *txflags = aninfo.txconfig; + *rxflags = aninfo.flags; + + if (status == ANEG_DONE && + (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | + MR_LP_ADV_FULL_DUPLEX))) + res = 1; + + return res; + } + + static void tg3_init_bcm8002(struct tg3 *tp) + { + u32 mac_status = tr32(MAC_STATUS); + int i; + + /* Reset when initting first time or we have a link. */ + if (tg3_flag(tp, INIT_COMPLETE) && + !(mac_status & MAC_STATUS_PCS_SYNCED)) + return; + + /* Set PLL lock range. */ + tg3_writephy(tp, 0x16, 0x8007); + + /* SW reset */ + tg3_writephy(tp, MII_BMCR, BMCR_RESET); + + /* Wait for reset to complete. */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 500; i++) + udelay(10); + + /* Config mode; select PMA/Ch 1 regs. */ + tg3_writephy(tp, 0x10, 0x8411); + + /* Enable auto-lock and comdet, select txclk for tx. */ + tg3_writephy(tp, 0x11, 0x0a10); + + tg3_writephy(tp, 0x18, 0x00a0); + tg3_writephy(tp, 0x16, 0x41ff); + + /* Assert and deassert POR. */ + tg3_writephy(tp, 0x13, 0x0400); + udelay(40); + tg3_writephy(tp, 0x13, 0x0000); + + tg3_writephy(tp, 0x11, 0x0a50); + udelay(40); + tg3_writephy(tp, 0x11, 0x0a10); + + /* Wait for signal to stabilize */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 15000; i++) + udelay(10); + + /* Deselect the channel register so we can read the PHYID + * later. + */ + tg3_writephy(tp, 0x10, 0x8011); + } + + static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) + { + u16 flowctrl; + u32 sg_dig_ctrl, sg_dig_status; + u32 serdes_cfg, expected_sg_dig_ctrl; + int workaround, port_a; + int current_link_up; + + serdes_cfg = 0; + expected_sg_dig_ctrl = 0; + workaround = 0; + port_a = 1; + current_link_up = 0; + + if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { + workaround = 1; + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + port_a = 0; + + /* preserve bits 0-11,13,14 for signal pre-emphasis */ + /* preserve bits 20-23 for voltage regulator */ + serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; + } + + sg_dig_ctrl = tr32(SG_DIG_CTRL); + + if (tp->link_config.autoneg != AUTONEG_ENABLE) { + if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + } + if (mac_status & MAC_STATUS_PCS_SYNCED) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + } + goto out; + } + + /* Want auto-negotiation. */ + expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; + + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; + + if (sg_dig_ctrl != expected_sg_dig_ctrl) { + if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && + tp->serdes_counter && + ((mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_RCVD_CFG)) == + MAC_STATUS_PCS_SYNCED)) { + tp->serdes_counter--; + current_link_up = 1; + goto out; + } + restart_autoneg: + if (workaround) + tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); + udelay(5); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); + + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + sg_dig_status = tr32(SG_DIG_STATUS); + mac_status = tr32(MAC_STATUS); + + if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && + (mac_status & MAC_STATUS_PCS_SYNCED)) { + u32 local_adv = 0, remote_adv = 0; + + if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) + local_adv |= ADVERTISE_1000XPAUSE; + if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) + remote_adv |= LPA_1000XPAUSE; + if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tg3_setup_flow_control(tp, local_adv, remote_adv); + current_link_up = 1; + tp->serdes_counter = 0; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { + if (tp->serdes_counter) + tp->serdes_counter--; + else { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + udelay(40); + + /* Link parallel detection - link is up */ + /* only if we have PCS_SYNC and not */ + /* receiving config code words */ + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + tp->phy_flags |= + TG3_PHYFLG_PARALLEL_DETECT; + tp->serdes_counter = + SERDES_PARALLEL_DET_TIMEOUT; + } else + goto restart_autoneg; + } + } + } else { + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + + out: + return current_link_up; + } + + static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) + { + int current_link_up = 0; + + if (!(mac_status & MAC_STATUS_PCS_SYNCED)) + goto out; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 txflags, rxflags; + int i; + + if (fiber_autoneg(tp, &txflags, &rxflags)) { + u32 local_adv = 0, remote_adv = 0; + + if (txflags & ANEG_CFG_PS1) + local_adv |= ADVERTISE_1000XPAUSE; + if (txflags & ANEG_CFG_PS2) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (rxflags & MR_LP_ADV_SYM_PAUSE) + remote_adv |= LPA_1000XPAUSE; + if (rxflags & MR_LP_ADV_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tg3_setup_flow_control(tp, local_adv, remote_adv); + + current_link_up = 1; + } + for (i = 0; i < 30; i++) { + udelay(20); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + if ((tr32(MAC_STATUS) & + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if (current_link_up == 0 && + (mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) + current_link_up = 1; + } else { + tg3_setup_flow_control(tp, 0, 0); + + /* Forcing 1000FD link up. */ + current_link_up = 1; + + tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + + out: + return current_link_up; + } + + static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) + { + u32 orig_pause_cfg; + u16 orig_active_speed; + u8 orig_active_duplex; + u32 mac_status; + int current_link_up; + int i; + + orig_pause_cfg = tp->link_config.active_flowctrl; + orig_active_speed = tp->link_config.active_speed; + orig_active_duplex = tp->link_config.active_duplex; + + if (!tg3_flag(tp, HW_AUTONEG) && + netif_carrier_ok(tp->dev) && + tg3_flag(tp, INIT_COMPLETE)) { + mac_status = tr32(MAC_STATUS); + mac_status &= (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_RCVD_CFG); + if (mac_status == (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + return 0; + } + } + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + if (tp->phy_id == TG3_PHY_ID_BCM8002) + tg3_init_bcm8002(tp); + + /* Enable link change event even when serdes polling. */ + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + udelay(40); + + current_link_up = 0; + mac_status = tr32(MAC_STATUS); + + if (tg3_flag(tp, HW_AUTONEG)) + current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); + else + current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); + + tp->napi[0].hw_status->status = + (SD_STATUS_UPDATED | + (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); + + for (i = 0; i < 100; i++) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(5); + if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_LNKSTATE_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { + current_link_up = 0; + if (tp->link_config.autoneg == AUTONEG_ENABLE && + tp->serdes_counter == 0) { + tw32_f(MAC_MODE, (tp->mac_mode | + MAC_MODE_SEND_CONFIGS)); + udelay(1); + tw32_f(MAC_MODE, tp->mac_mode); + } + } + + if (current_link_up == 1) { + tp->link_config.active_speed = SPEED_1000; + tp->link_config.active_duplex = DUPLEX_FULL; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON)); + } else { + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE)); + } + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } else { + u32 now_pause_cfg = tp->link_config.active_flowctrl; + if (orig_pause_cfg != now_pause_cfg || + orig_active_speed != tp->link_config.active_speed || + orig_active_duplex != tp->link_config.active_duplex) + tg3_link_report(tp); + } + + return 0; + } + + static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) + { + int current_link_up, err = 0; + u32 bmsr, bmcr; + u16 current_speed; + u8 current_duplex; + u32 local_adv, remote_adv; + + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tw32(MAC_EVENT, 0); + + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + + if (force_reset) + tg3_phy_reset(tp); + + current_link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + + err |= tg3_readphy(tp, MII_BMCR, &bmcr); + + if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + /* do nothing, just check for link up at the end */ + } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 adv, new_adv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); + + new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + + if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000XHALF; + if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000XFULL; + + if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, new_adv); + bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; + tg3_writephy(tp, MII_BMCR, bmcr); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + return err; + } + } else { + u32 new_bmcr; + + bmcr &= ~BMCR_SPEED1000; + new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); + + if (tp->link_config.duplex == DUPLEX_FULL) + new_bmcr |= BMCR_FULLDPLX; + + if (new_bmcr != bmcr) { + /* BMCR_SPEED1000 is a reserved bit that needs + * to be set on write. + */ + new_bmcr |= BMCR_SPEED1000; + + /* Force a linkdown */ + if (netif_carrier_ok(tp->dev)) { + u32 adv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + adv &= ~(ADVERTISE_1000XFULL | + ADVERTISE_1000XHALF | + ADVERTISE_SLCT); + tg3_writephy(tp, MII_ADVERTISE, adv); + tg3_writephy(tp, MII_BMCR, bmcr | + BMCR_ANRESTART | + BMCR_ANENABLE); + udelay(10); + netif_carrier_off(tp->dev); + } + tg3_writephy(tp, MII_BMCR, new_bmcr); + bmcr = new_bmcr; + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + } + + if (bmsr & BMSR_LSTATUS) { + current_speed = SPEED_1000; + current_link_up = 1; + if (bmcr & BMCR_FULLDPLX) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + + local_adv = 0; + remote_adv = 0; + + if (bmcr & BMCR_ANENABLE) { + u32 common; + + err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); + err |= tg3_readphy(tp, MII_LPA, &remote_adv); + common = local_adv & remote_adv; + if (common & (ADVERTISE_1000XHALF | + ADVERTISE_1000XFULL)) { + if (common & ADVERTISE_1000XFULL) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + } else if (!tg3_flag(tp, 5780_CLASS)) { + /* Link is up via parallel detect */ + } else { + current_link_up = 0; + } + } + } + + if (current_link_up == 1 && current_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, local_adv, remote_adv); + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else { + netif_carrier_off(tp->dev); + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + tg3_link_report(tp); + } + return err; + } + + static void tg3_serdes_parallel_detect(struct tg3 *tp) + { + if (tp->serdes_counter) { + /* Give autoneg time to complete. */ + tp->serdes_counter--; + return; + } + + if (!netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE)) { + u32 bmcr; + + tg3_readphy(tp, MII_BMCR, &bmcr); + if (bmcr & BMCR_ANENABLE) { + u32 phy1, phy2; + + /* Select shadow register 0x1f */ + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); + tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + + if ((phy1 & 0x10) && !(phy2 & 0x20)) { + /* We have signal detect and not receiving + * config code words, link is up by parallel + * detection. + */ + + bmcr &= ~BMCR_ANENABLE; + bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; + tg3_writephy(tp, MII_BMCR, bmcr); + tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; + } + } + } else if (netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE) && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + u32 phy2; + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + if (phy2 & 0x20) { + u32 bmcr; + + /* Config code words received, turn on autoneg. */ + tg3_readphy(tp, MII_BMCR, &bmcr); + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); + + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + } + } + } + + static int tg3_setup_phy(struct tg3 *tp, int force_reset) + { + u32 val; + int err; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + err = tg3_setup_fiber_phy(tp, force_reset); + else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + err = tg3_setup_fiber_mii_phy(tp, force_reset); + else + err = tg3_setup_copper_phy(tp, force_reset); + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + u32 scale; + + val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; + if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) + scale = 65; + else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) + scale = 6; + else + scale = 12; + + val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; + val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + } + + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + if (tp->link_config.active_speed == SPEED_1000 && + tp->link_config.active_duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, val | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); + else + tw32(MAC_TX_LENGTHS, val | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); + + if (!tg3_flag(tp, 5705_PLUS)) { + if (netif_carrier_ok(tp->dev)) { + tw32(HOSTCC_STAT_COAL_TICKS, + tp->coal.stats_block_coalesce_usecs); + } else { + tw32(HOSTCC_STAT_COAL_TICKS, 0); + } + } + + if (tg3_flag(tp, ASPM_WORKAROUND)) { + val = tr32(PCIE_PWR_MGMT_THRESH); + if (!netif_carrier_ok(tp->dev)) + val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | + tp->pwrmgmt_thresh; + else + val |= PCIE_PWR_MGMT_L1_THRESH_MSK; + tw32(PCIE_PWR_MGMT_THRESH, val); + } + + return err; + } + + static inline int tg3_irq_sync(struct tg3 *tp) + { + return tp->irq_sync; + } + + static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) + { + int i; + + dst = (u32 *)((u8 *)dst + off); + for (i = 0; i < len; i += sizeof(u32)) + *dst++ = tr32(off + i); + } + + static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) + { + tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); + tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); + tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); + tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); + tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); + tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); + tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); + tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); + tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); + tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); + tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); + tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); + tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); + tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); + + if (tg3_flag(tp, SUPPORT_MSIX)) + tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); + + tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); + tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); + tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); + + if (!tg3_flag(tp, 5705_PLUS)) { + tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); + } + + tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); + tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); + tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); + tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); + + if (tg3_flag(tp, NVRAM)) + tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); + } + + static void tg3_dump_state(struct tg3 *tp) + { + int i; + u32 *regs; + + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); + if (!regs) { + netdev_err(tp->dev, "Failed allocating register dump buffer\n"); + return; + } + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Read up to but not including private PCI registers */ + for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) + regs[i / sizeof(u32)] = tr32(i); + } else + tg3_dump_legacy_regs(tp, regs); + + for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { + if (!regs[i + 0] && !regs[i + 1] && + !regs[i + 2] && !regs[i + 3]) + continue; + + netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + i * 4, + regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); + } + + kfree(regs); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + /* SW status block */ + netdev_err(tp->dev, + "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + i, + tnapi->hw_status->status, + tnapi->hw_status->status_tag, + tnapi->hw_status->rx_jumbo_consumer, + tnapi->hw_status->rx_consumer, + tnapi->hw_status->rx_mini_consumer, + tnapi->hw_status->idx[0].rx_producer, + tnapi->hw_status->idx[0].tx_consumer); + + netdev_err(tp->dev, + "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", + i, + tnapi->last_tag, tnapi->last_irq_tag, + tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, + tnapi->rx_rcb_ptr, + tnapi->prodring.rx_std_prod_idx, + tnapi->prodring.rx_std_cons_idx, + tnapi->prodring.rx_jmb_prod_idx, + tnapi->prodring.rx_jmb_cons_idx); + } + } + + /* This is called whenever we suspect that the system chipset is re- + * ordering the sequence of MMIO to the tx send mailbox. The symptom + * is bogus tx completions. We try to recover by setting the + * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later + * in the workqueue. + */ + static void tg3_tx_recover(struct tg3 *tp) + { + BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || + tp->write32_tx_mbox == tg3_write_indirect_mbox); + + netdev_warn(tp->dev, + "The system may be re-ordering memory-mapped I/O " + "cycles to the network device, attempting to recover. " + "Please report the problem to the driver maintainer " + "and include system chipset information.\n"); + + spin_lock(&tp->lock); + tg3_flag_set(tp, TX_RECOVERY_PENDING); + spin_unlock(&tp->lock); + } + + static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) + { + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + return tnapi->tx_pending - + ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); + } + + /* Tigon3 never reports partial packet sends. So we do not + * need special logic to handle SKBs that have not had all + * of their frags sent yet, like SunGEM does. + */ + static void tg3_tx(struct tg3_napi *tnapi) + { + struct tg3 *tp = tnapi->tp; + u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; + u32 sw_idx = tnapi->tx_cons; + struct netdev_queue *txq; + int index = tnapi - tp->napi; + + if (tg3_flag(tp, ENABLE_TSS)) + index--; + + txq = netdev_get_tx_queue(tp->dev, index); + + while (sw_idx != hw_idx) { + struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; + struct sk_buff *skb = ri->skb; + int i, tx_bug = 0; + + if (unlikely(skb == NULL)) { + tg3_tx_recover(tp); + return; + } + + pci_unmap_single(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + ri->skb = NULL; + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + ri = &tnapi->tx_buffers[sw_idx]; + if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) + tx_bug = 1; + + pci_unmap_page(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + } + + dev_kfree_skb(skb); + + if (unlikely(tx_bug)) { + tg3_tx_recover(tp); + return; + } + } + + tnapi->tx_cons = sw_idx; + + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } + } + + static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) + { + if (!ri->skb) + return; + + pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), + map_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(ri->skb); + ri->skb = NULL; + } + + /* Returns size of skb allocated or < 0 on error. + * + * We only need to fill in the address because the other members + * of the RX descriptor are invariant, see tg3_init_rings. + * + * Note the purposeful assymetry of cpu vs. chip accesses. For + * posting buffers we only dirty the first cache line of the RX + * descriptor (containing the address). Whereas for the RX status + * buffers the cpu only reads the last cacheline of the RX descriptor + * (to fetch the error flags, vlan tag, checksum, and opaque cookie). + */ + static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, + u32 opaque_key, u32 dest_idx_unmasked) + { + struct tg3_rx_buffer_desc *desc; + struct ring_info *map; + struct sk_buff *skb; + dma_addr_t mapping; + int skb_size, dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + desc = &tpr->rx_std[dest_idx]; + map = &tpr->rx_std_buffers[dest_idx]; + skb_size = tp->rx_pkt_map_sz; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + desc = &tpr->rx_jmb[dest_idx].std; + map = &tpr->rx_jmb_buffers[dest_idx]; + skb_size = TG3_RX_JMB_MAP_SZ; + break; + + default: + return -EINVAL; + } + + /* Do not overwrite any of the map or rp information + * until we are sure we can commit to a new buffer. + * + * Callers depend upon this behavior and assume that + * we leave everything unchanged if we fail. + */ + skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp)); + if (skb == NULL) + return -ENOMEM; + + skb_reserve(skb, TG3_RX_OFFSET(tp)); + + mapping = pci_map_single(tp->pdev, skb->data, skb_size, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + return -EIO; + } + + map->skb = skb; + dma_unmap_addr_set(map, mapping, mapping); + + desc->addr_hi = ((u64)mapping >> 32); + desc->addr_lo = ((u64)mapping & 0xffffffff); + + return skb_size; + } + + /* We only need to move over in the address because the other + * members of the RX descriptor are invariant. See notes above + * tg3_alloc_rx_skb for full details. + */ + static void tg3_recycle_rx(struct tg3_napi *tnapi, + struct tg3_rx_prodring_set *dpr, + u32 opaque_key, int src_idx, + u32 dest_idx_unmasked) + { + struct tg3 *tp = tnapi->tp; + struct tg3_rx_buffer_desc *src_desc, *dest_desc; + struct ring_info *src_map, *dest_map; + struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; + int dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + dest_desc = &dpr->rx_std[dest_idx]; + dest_map = &dpr->rx_std_buffers[dest_idx]; + src_desc = &spr->rx_std[src_idx]; + src_map = &spr->rx_std_buffers[src_idx]; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + dest_desc = &dpr->rx_jmb[dest_idx].std; + dest_map = &dpr->rx_jmb_buffers[dest_idx]; + src_desc = &spr->rx_jmb[src_idx].std; + src_map = &spr->rx_jmb_buffers[src_idx]; + break; + + default: + return; + } + + dest_map->skb = src_map->skb; + dma_unmap_addr_set(dest_map, mapping, + dma_unmap_addr(src_map, mapping)); + dest_desc->addr_hi = src_desc->addr_hi; + dest_desc->addr_lo = src_desc->addr_lo; + + /* Ensure that the update to the skb happens after the physical + * addresses have been transferred to the new BD location. + */ + smp_wmb(); + + src_map->skb = NULL; + } + + /* The RX ring scheme is composed of multiple rings which post fresh + * buffers to the chip, and one special ring the chip uses to report + * status back to the host. + * + * The special ring reports the status of received packets to the + * host. The chip does not write into the original descriptor the + * RX buffer was obtained from. The chip simply takes the original + * descriptor as provided by the host, updates the status and length + * field, then writes this into the next status ring entry. + * + * Each ring the host uses to post buffers to the chip is described + * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, + * it is first placed into the on-chip ram. When the packet's length + * is known, it walks down the TG3_BDINFO entries to select the ring. + * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO + * which is within the range of the new packet's length is chosen. + * + * The "separate ring for rx status" scheme may sound queer, but it makes + * sense from a cache coherency perspective. If only the host writes + * to the buffer post rings, and only the chip writes to the rx status + * rings, then cache lines never move beyond shared-modified state. + * If both the host and chip were to write into the same ring, cache line + * eviction could occur since both entities want it in an exclusive state. + */ + static int tg3_rx(struct tg3_napi *tnapi, int budget) + { + struct tg3 *tp = tnapi->tp; + u32 work_mask, rx_std_posted = 0; + u32 std_prod_idx, jmb_prod_idx; + u32 sw_idx = tnapi->rx_rcb_ptr; + u16 hw_idx; + int received; + struct tg3_rx_prodring_set *tpr = &tnapi->prodring; + + hw_idx = *(tnapi->rx_rcb_prod_idx); + /* + * We need to order the read of hw_idx and the read of + * the opaque cookie. + */ + rmb(); + work_mask = 0; + received = 0; + std_prod_idx = tpr->rx_std_prod_idx; + jmb_prod_idx = tpr->rx_jmb_prod_idx; + while (sw_idx != hw_idx && budget > 0) { + struct ring_info *ri; + struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; + unsigned int len; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 opaque_key, desc_idx, *post_ptr; + + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + if (opaque_key == RXD_OPAQUE_RING_STD) { + ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &std_prod_idx; + rx_std_posted++; + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &jmb_prod_idx; + } else + goto next_pkt_nopost; + + work_mask |= opaque_key; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { + drop_it: + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + drop_it_no_recycle: + /* Other statistics kept track of by card. */ + tp->rx_dropped++; + goto next_pkt; + } + + len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - + ETH_FCS_LEN; + + if (len > TG3_RX_COPY_THRESH(tp)) { + int skb_size; + + skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, + *post_ptr); + if (skb_size < 0) + goto drop_it; + + pci_unmap_single(tp->pdev, dma_addr, skb_size, + PCI_DMA_FROMDEVICE); + + /* Ensure that the update to the skb happens + * after the usage of the old DMA mapping. + */ + smp_wmb(); + + ri->skb = NULL; + + skb_put(skb, len); + } else { + struct sk_buff *copy_skb; + + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + + copy_skb = netdev_alloc_skb(tp->dev, len + + TG3_RAW_IP_ALIGN); + if (copy_skb == NULL) + goto drop_it_no_recycle; + + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); + skb_put(copy_skb, len); + pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(skb, copy_skb->data, len); + pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + + /* We'll reuse the original ring buffer. */ + skb = copy_skb; + } + + if ((tp->dev->features & NETIF_F_RXCSUM) && + (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT) == 0xffff)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + skb->protocol = eth_type_trans(skb, tp->dev); + + if (len > (tp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(ETH_P_8021Q)) { + dev_kfree_skb(skb); + goto drop_it_no_recycle; + } + + if (desc->type_flags & RXD_FLAG_VLAN && + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, + desc->err_vlan & RXD_VLAN_MASK); + + napi_gro_receive(&tnapi->napi, skb); + + received++; + budget--; + + next_pkt: + (*post_ptr)++; + + if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + work_mask &= ~RXD_OPAQUE_RING_STD; + rx_std_posted = 0; + } + next_pkt_nopost: + sw_idx++; + sw_idx &= tp->rx_ret_ring_mask; + + /* Refresh hw_idx to see if there is new work */ + if (sw_idx == hw_idx) { + hw_idx = *(tnapi->rx_rcb_prod_idx); + rmb(); + } + } + + /* ACK the status ring. */ + tnapi->rx_rcb_ptr = sw_idx; + tw32_rx_mbox(tnapi->consmbox, sw_idx); + + /* Refill RX ring(s). */ + if (!tg3_flag(tp, ENABLE_RSS)) { + if (work_mask & RXD_OPAQUE_RING_STD) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + } + if (work_mask & RXD_OPAQUE_RING_JUMBO) { + tpr->rx_jmb_prod_idx = jmb_prod_idx & + tp->rx_jmb_ring_mask; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + tpr->rx_jmb_prod_idx); + } + mmiowb(); + } else if (work_mask) { + /* rx_std_buffers[] and rx_jmb_buffers[] entries must be + * updated before the producer indices can be updated. + */ + smp_wmb(); + + tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; + tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; + + if (tnapi != &tp->napi[1]) + napi_schedule(&tp->napi[1].napi); + } + + return received; + } + + static void tg3_poll_link(struct tg3 *tp) + { + /* handle link change and other phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + struct tg3_hw_status *sblk = tp->napi[0].hw_status; + + if (sblk->status & SD_STATUS_LINK_CHG) { + sblk->status = SD_STATUS_UPDATED | + (sblk->status & ~SD_STATUS_LINK_CHG); + spin_lock(&tp->lock); + if (tg3_flag(tp, USE_PHYLIB)) { + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + } else + tg3_setup_phy(tp, 0); + spin_unlock(&tp->lock); + } + } + } + + static int tg3_rx_prodring_xfer(struct tg3 *tp, + struct tg3_rx_prodring_set *dpr, + struct tg3_rx_prodring_set *spr) + { + u32 si, di, cpycnt, src_prod_idx; + int i, err = 0; + + while (1) { + src_prod_idx = spr->rx_std_prod_idx; + + /* Make sure updates to the rx_std_buffers[] entries and the + * standard producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_std_cons_idx == src_prod_idx) + break; + + if (spr->rx_std_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_std_cons_idx; + else + cpycnt = tp->rx_std_ring_mask + 1 - + spr->rx_std_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); + + si = spr->rx_std_cons_idx; + di = dpr->rx_std_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_std_buffers[i].skb) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_std_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_std_buffers[di], + &spr->rx_std_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_std[si]; + dbd = &dpr->rx_std[di]; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & + tp->rx_std_ring_mask; + dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & + tp->rx_std_ring_mask; + } + + while (1) { + src_prod_idx = spr->rx_jmb_prod_idx; + + /* Make sure updates to the rx_jmb_buffers[] entries and + * the jumbo producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_jmb_cons_idx == src_prod_idx) + break; + + if (spr->rx_jmb_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; + else + cpycnt = tp->rx_jmb_ring_mask + 1 - + spr->rx_jmb_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); + + si = spr->rx_jmb_cons_idx; + di = dpr->rx_jmb_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_jmb_buffers[i].skb) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_jmb_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_jmb_buffers[di], + &spr->rx_jmb_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_jmb[si].std; + dbd = &dpr->rx_jmb[di].std; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & + tp->rx_jmb_ring_mask; + dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & + tp->rx_jmb_ring_mask; + } + + return err; + } + + static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) + { + struct tg3 *tp = tnapi->tp; + + /* run TX completion thread */ + if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { + tg3_tx(tnapi); + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + return work_done; + } + + /* run RX thread, within the bounds set by NAPI. + * All RX "locking" is done by ensuring outside + * code synchronizes with tg3->napi.poll() + */ + if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_done += tg3_rx(tnapi, budget - work_done); + + if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { + struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; + int i, err = 0; + u32 std_prod_idx = dpr->rx_std_prod_idx; + u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; + + for (i = 1; i < tp->irq_cnt; i++) + err |= tg3_rx_prodring_xfer(tp, dpr, + &tp->napi[i].prodring); + + wmb(); + + if (std_prod_idx != dpr->rx_std_prod_idx) + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + dpr->rx_std_prod_idx); + + if (jmb_prod_idx != dpr->rx_jmb_prod_idx) + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + dpr->rx_jmb_prod_idx); + + mmiowb(); + + if (err) + tw32_f(HOSTCC_MODE, tp->coal_now); + } + + return work_done; + } + + static int tg3_poll_msix(struct napi_struct *napi, int budget) + { + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + + /* check for RX/TX work to do */ + if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && + *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { + napi_complete(napi); + /* Reenable interrupts. */ + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + break; + } + } + + return work_done; + + tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); + return work_done; + } + + static void tg3_process_error(struct tg3 *tp) + { + u32 val; + bool real_error = false; + + if (tg3_flag(tp, ERROR_PROCESSED)) + return; + + /* Check Flow Attention register */ + val = tr32(HOSTCC_FLOW_ATTN); + if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { + netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { + netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { + netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); + real_error = true; + } + + if (!real_error) + return; + + tg3_dump_state(tp); + + tg3_flag_set(tp, ERROR_PROCESSED); + schedule_work(&tp->reset_task); + } + + static int tg3_poll(struct napi_struct *napi, int budget) + { + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + if (sblk->status & SD_STATUS_ERROR) + tg3_process_error(tp); + + tg3_poll_link(tp); + + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + if (tg3_flag(tp, TAGGED_STATUS)) { + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + } else + sblk->status &= ~SD_STATUS_UPDATED; + + if (likely(!tg3_has_work(tnapi))) { + napi_complete(napi); + tg3_int_reenable(tnapi); + break; + } + } + + return work_done; + + tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); + return work_done; + } + + static void tg3_napi_disable(struct tg3 *tp) + { + int i; + + for (i = tp->irq_cnt - 1; i >= 0; i--) + napi_disable(&tp->napi[i].napi); + } + + static void tg3_napi_enable(struct tg3 *tp) + { + int i; + + for (i = 0; i < tp->irq_cnt; i++) + napi_enable(&tp->napi[i].napi); + } + + static void tg3_napi_init(struct tg3 *tp) + { + int i; + + netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); + for (i = 1; i < tp->irq_cnt; i++) + netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); + } + + static void tg3_napi_fini(struct tg3 *tp) + { + int i; + + for (i = 0; i < tp->irq_cnt; i++) + netif_napi_del(&tp->napi[i].napi); + } + + static inline void tg3_netif_stop(struct tg3 *tp) + { + tp->dev->trans_start = jiffies; /* prevent tx timeout */ + tg3_napi_disable(tp); + netif_tx_disable(tp->dev); + } + + static inline void tg3_netif_start(struct tg3 *tp) + { + /* NOTE: unconditional netif_tx_wake_all_queues is only + * appropriate so long as all callers are assured to + * have free tx slots (such as after tg3_init_hw) + */ + netif_tx_wake_all_queues(tp->dev); + + tg3_napi_enable(tp); + tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; + tg3_enable_ints(tp); + } + + static void tg3_irq_quiesce(struct tg3 *tp) + { + int i; + + BUG_ON(tp->irq_sync); + + tp->irq_sync = 1; + smp_mb(); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); + } + + /* Fully shutdown all tg3 driver activity elsewhere in the system. + * If irq_sync is non-zero, then the IRQ handler must be synchronized + * with as well. Most of the time, this is not necessary except when + * shutting down the device. + */ + static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) + { + spin_lock_bh(&tp->lock); + if (irq_sync) + tg3_irq_quiesce(tp); + } + + static inline void tg3_full_unlock(struct tg3 *tp) + { + spin_unlock_bh(&tp->lock); + } + + /* One-shot MSI handler - Chip automatically disables interrupt + * after sending MSI so driver doesn't have to do it. + */ + static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_HANDLED; + } + + /* MSI ISR - No need to check for interrupt sharing and no need to + * flush status block and interrupt mailbox. PCI ordering rules + * guarantee that MSI will arrive after the status block. + */ + static irqreturn_t tg3_msi(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + */ + tw32_mailbox(tnapi->int_mbox, 0x00000001); + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_RETVAL(1); + } + + static irqreturn_t tg3_interrupt(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + if (tg3_irq_sync(tp)) + goto out; + sblk->status &= ~SD_STATUS_UPDATED; + if (likely(tg3_has_work(tnapi))) { + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + napi_schedule(&tnapi->napi); + } else { + /* No work, shared interrupt perhaps? re-enable + * interrupts, and flush that PCI write + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + 0x00000000); + } + out: + return IRQ_RETVAL(handled); + } + + static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + + /* + * In a shared interrupt configuration, sometimes other devices' + * interrupts will scream. We record the current status tag here + * so that the above check can report that the screaming interrupts + * are unhandled. Eventually they will be silenced. + */ + tnapi->last_irq_tag = sblk->status_tag; + + if (tg3_irq_sync(tp)) + goto out; + + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + napi_schedule(&tnapi->napi); + + out: + return IRQ_RETVAL(handled); + } + + /* ISR for interrupt test */ + static irqreturn_t tg3_test_isr(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + + if ((sblk->status & SD_STATUS_UPDATED) || + !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + tg3_disable_ints(tp); + return IRQ_RETVAL(1); + } + return IRQ_RETVAL(0); + } + + static int tg3_init_hw(struct tg3 *, int); + static int tg3_halt(struct tg3 *, int, int); + + /* Restart hardware after configuration changes, self-test, etc. + * Invoked with tp->lock held. + */ + static int tg3_restart_hw(struct tg3 *tp, int reset_phy) + __releases(tp->lock) + __acquires(tp->lock) + { + int err; + + err = tg3_init_hw(tp, reset_phy); + if (err) { + netdev_err(tp->dev, + "Failed to re-initialize device, aborting\n"); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); + del_timer_sync(&tp->timer); + tp->irq_sync = 0; + tg3_napi_enable(tp); + dev_close(tp->dev); + tg3_full_lock(tp, 0); + } + return err; + } + + #ifdef CONFIG_NET_POLL_CONTROLLER + static void tg3_poll_controller(struct net_device *dev) + { + int i; + struct tg3 *tp = netdev_priv(dev); + + for (i = 0; i < tp->irq_cnt; i++) + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); + } + #endif + + static void tg3_reset_task(struct work_struct *work) + { + struct tg3 *tp = container_of(work, struct tg3, reset_task); + int err; + unsigned int restart_timer; + + tg3_full_lock(tp, 0); + + if (!netif_running(tp->dev)) { + tg3_full_unlock(tp); + return; + } + + tg3_full_unlock(tp); + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + restart_timer = tg3_flag(tp, RESTART_TIMER); + tg3_flag_clear(tp, RESTART_TIMER); + + if (tg3_flag(tp, TX_RECOVERY_PENDING)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + tp->write32_rx_mbox = tg3_write_flush_reg32; + tg3_flag_set(tp, MBOX_WRITE_REORDER); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + } + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + err = tg3_init_hw(tp, 1); + if (err) + goto out; + + tg3_netif_start(tp); + + if (restart_timer) + mod_timer(&tp->timer, jiffies + 1); + + out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + } + + static void tg3_tx_timeout(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + + if (netif_msg_tx_err(tp)) { + netdev_err(dev, "transmit timed out, resetting\n"); + tg3_dump_state(tp); + } + + schedule_work(&tp->reset_task); + } + + /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ + static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) + { + u32 base = (u32) mapping & 0xffffffff; + + return (base > 0xffffdcc0) && (base + len + 8 < base); + } + + /* Test for DMA addresses > 40-bit */ + static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, + int len) + { + #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) + if (tg3_flag(tp, 40BIT_DMA_BUG)) + return ((u64) mapping + len) > DMA_BIT_MASK(40); + return 0; + #else + return 0; + #endif + } + + static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, + dma_addr_t mapping, u32 len, u32 flags, + u32 mss, u32 vlan) + { + txbd->addr_hi = ((u64) mapping >> 32); + txbd->addr_lo = ((u64) mapping & 0xffffffff); + txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); + txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); + } + + static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, + dma_addr_t map, u32 len, u32 flags, + u32 mss, u32 vlan) + { + struct tg3 *tp = tnapi->tp; + bool hwbug = false; + + if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) + hwbug = 1; + + if (tg3_4g_overflow_test(map, len)) + hwbug = 1; + + if (tg3_40bit_overflow_test(tp, map, len)) + hwbug = 1; + + if (tg3_flag(tp, 4K_FIFO_LIMIT)) { + u32 tmp_flag = flags & ~TXD_FLAG_END; + while (len > TG3_TX_BD_DMA_MAX) { + u32 frag_len = TG3_TX_BD_DMA_MAX; + len -= TG3_TX_BD_DMA_MAX; + + if (len) { + tnapi->tx_buffers[*entry].fragmented = true; + /* Avoid the 8byte DMA problem */ + if (len <= 8) { + len += TG3_TX_BD_DMA_MAX / 2; + frag_len = TG3_TX_BD_DMA_MAX / 2; + } + } else + tmp_flag = flags; + + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + frag_len, tmp_flag, mss, vlan); + (*budget)--; + *entry = NEXT_TX(*entry); + } else { + hwbug = 1; + break; + } + + map += frag_len; + } + + if (len) { + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + (*budget)--; + *entry = NEXT_TX(*entry); + } else { + hwbug = 1; + } + } + } else { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *entry = NEXT_TX(*entry); + } + + return hwbug; + } + + static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) + { + int i; + struct sk_buff *skb; + struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; + + skb = txb->skb; + txb->skb = NULL; + + pci_unmap_single(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + + for (i = 0; i < last; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + + pci_unmap_page(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_frag_size(frag), PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + } + } + + /* Workaround 4GB and 40-bit hardware DMA bugs. */ + static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, - struct sk_buff *skb, ++ struct sk_buff **pskb, + u32 *entry, u32 *budget, + u32 base_flags, u32 mss, u32 vlan) + { + struct tg3 *tp = tnapi->tp; - struct sk_buff *new_skb; ++ struct sk_buff *new_skb, *skb = *pskb; + dma_addr_t new_addr = 0; + int ret = 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + new_skb = skb_copy(skb, GFP_ATOMIC); + else { + int more_headroom = 4 - ((unsigned long)skb->data & 3); + + new_skb = skb_copy_expand(skb, + skb_headroom(skb) + more_headroom, + skb_tailroom(skb), GFP_ATOMIC); + } + + if (!new_skb) { + ret = -1; + } else { + /* New SKB is guaranteed to be linear. */ + new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, + PCI_DMA_TODEVICE); + /* Make sure the mapping succeeded */ + if (pci_dma_mapping_error(tp->pdev, new_addr)) { + dev_kfree_skb(new_skb); + ret = -1; + } else { + base_flags |= TXD_FLAG_END; + + tnapi->tx_buffers[*entry].skb = new_skb; + dma_unmap_addr_set(&tnapi->tx_buffers[*entry], + mapping, new_addr); + + if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, + new_skb->len, base_flags, + mss, vlan)) { + tg3_tx_skb_unmap(tnapi, *entry, 0); + dev_kfree_skb(new_skb); + ret = -1; + } + } + } + + dev_kfree_skb(skb); - ++ *pskb = new_skb; + return ret; + } + + static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); + + /* Use GSO to workaround a rare TSO bug that may be triggered when the + * TSO header is greater than 80 bytes. + */ + static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) + { + struct sk_buff *segs, *nskb; + u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; + + /* Estimate the number of fragments in the worst case */ + if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { + netif_stop_queue(tp->dev); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) + return NETDEV_TX_BUSY; + + netif_wake_queue(tp->dev); + } + + segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) + goto tg3_tso_bug_end; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + tg3_start_xmit(nskb, tp->dev); + } while (segs); + + tg3_tso_bug_end: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; + } + + /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and + * support TG3_FLAG_HW_TSO_1 or firmware TSO only. + */ + static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + u32 len, entry, base_flags, mss, vlan = 0; + u32 budget; + int i = -1, would_hit_hwbug; + dma_addr_t mapping; + struct tg3_napi *tnapi; + struct netdev_queue *txq; + unsigned int last; + + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + tnapi = &tp->napi[skb_get_queue_mapping(skb)]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + budget = tg3_tx_avail(tnapi); + + /* We are running in BH disabled context with netif_tx_lock + * and TX reclaim runs via tp->napi.poll inside of a software + * interrupt. Furthermore, IRQ processing runs lockless so we have + * no IRQ context deadlocks to worry about either. Rejoice! + */ + if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + + /* This is a hard error, log it. */ + netdev_err(dev, + "BUG! Tx Ring full when queue awake!\n"); + } + return NETDEV_TX_BUSY; + } + + entry = tnapi->tx_prod; + base_flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + struct iphdr *iph; + u32 tcp_opt_len, hdr_len; + + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + goto out_unlock; + } + + iph = ip_hdr(skb); + tcp_opt_len = tcp_optlen(skb); + + if (skb_is_gso_v6(skb)) { + hdr_len = skb_headlen(skb) - ETH_HLEN; + } else { + u32 ip_tcp_len; + + ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); + hdr_len = ip_tcp_len + tcp_opt_len; + + iph->check = 0; + iph->tot_len = htons(mss + hdr_len); + } + + if (unlikely((ETH_HLEN + hdr_len) > 80) && + tg3_flag(tp, TSO_BUG)) + return tg3_tso_bug(tp, skb); + + base_flags |= (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + tcp_hdr(skb)->check = 0; + base_flags &= ~TXD_FLAG_TCPUDP_CSUM; + } else + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + mss |= (tsflags << 11); + } + } else { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + base_flags |= tsflags << 12; + } + } + } + + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + !mss && skb->len > VLAN_ETH_FRAME_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + + if (vlan_tx_tag_present(skb)) { + base_flags |= TXD_FLAG_VLAN; + vlan = vlan_tx_tag_get(skb); + } + + len = skb_headlen(skb); + + mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + goto out_unlock; + } + + tnapi->tx_buffers[entry].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); + + would_hit_hwbug = 0; + + if (tg3_flag(tp, 5701_DMA_BUG)) + would_hit_hwbug = 1; + + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | + ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), + mss, vlan)) + would_hit_hwbug = 1; + + /* Now loop through additional data fragments, and queue them. */ + if (skb_shinfo(skb)->nr_frags > 0) { + u32 tmp_mss = mss; + + if (!tg3_flag(tp, HW_TSO_1) && + !tg3_flag(tp, HW_TSO_2) && + !tg3_flag(tp, HW_TSO_3)) + tmp_mss = 0; + + last = skb_shinfo(skb)->nr_frags - 1; + for (i = 0; i <= last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, + len, DMA_TO_DEVICE); + + tnapi->tx_buffers[entry].skb = NULL; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + mapping); + if (dma_mapping_error(&tp->pdev->dev, mapping)) + goto dma_error; + + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, + len, base_flags | + ((i == last) ? TXD_FLAG_END : 0), + tmp_mss, vlan)) + would_hit_hwbug = 1; + } + } + + if (would_hit_hwbug) { + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + + /* If the workaround fails due to memory/mapping + * failure, silently drop this packet. + */ + entry = tnapi->tx_prod; + budget = tg3_tx_avail(tnapi); - if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget, ++ if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, + base_flags, mss, vlan)) + goto out_unlock; + } + + skb_tx_timestamp(skb); + + /* Packets are ready, update Tx producer idx local and on card. */ + tw32_tx_mbox(tnapi->prodmbox, entry); + + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) + netif_tx_wake_queue(txq); + } + + out_unlock: + mmiowb(); + + return NETDEV_TX_OK; + + dma_error: + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + dev_kfree_skb(skb); + tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; + return NETDEV_TX_OK; + } + + static void tg3_mac_loopback(struct tg3 *tp, bool enable) + { + if (enable) { + tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | + MAC_MODE_PORT_MODE_MASK); + + tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; + + if (!tg3_flag(tp, 5705_PLUS)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else { + tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; + + if (tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + + tw32(MAC_MODE, tp->mac_mode); + udelay(40); + } + + static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) + { + u32 val, bmcr, mac_mode, ptest = 0; + + tg3_phy_toggle_apd(tp, false); + tg3_phy_toggle_automdix(tp, 0); + + if (extlpbk && tg3_phy_set_extloopbk(tp)) + return -EIO; + + bmcr = BMCR_FULLDPLX; + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + case SPEED_1000: + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + speed = SPEED_100; + bmcr |= BMCR_SPEED100; + } else { + speed = SPEED_1000; + bmcr |= BMCR_SPEED1000; + } + } + + if (extlpbk) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_readphy(tp, MII_CTRL1000, &val); + val |= CTL1000_AS_MASTER | + CTL1000_ENABLE_MASTER; + tg3_writephy(tp, MII_CTRL1000, val); + } else { + ptest = MII_TG3_FET_PTEST_TRIM_SEL | + MII_TG3_FET_PTEST_TRIM_2; + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); + } + } else + bmcr |= BMCR_LOOPBACK; + + tg3_writephy(tp, MII_BMCR, bmcr); + + /* The write needs to be flushed for the FETs */ + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tg3_readphy(tp, MII_BMCR, &bmcr); + + udelay(40); + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | + MII_TG3_FET_PTEST_FRC_TX_LINK | + MII_TG3_FET_PTEST_FRC_TX_LOCK); + + /* The write needs to be flushed for the AC131 */ + tg3_readphy(tp, MII_TG3_FET_PTEST, &val); + } + + /* Reset to prevent losing 1st rx packet intermittently */ + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + tw32_f(MAC_RX_MODE, tp->rx_mode); + } + + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + if (speed == SPEED_1000) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; + + if (masked_phy_id == TG3_PHY_ID_BCM5401) + mac_mode &= ~MAC_MODE_LINK_POLARITY; + else if (masked_phy_id == TG3_PHY_ID_BCM5411) + mac_mode |= MAC_MODE_LINK_POLARITY; + + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + } + + tw32(MAC_MODE, mac_mode); + udelay(40); + + return 0; + } + + static void tg3_set_loopback(struct net_device *dev, u32 features) + { + struct tg3 *tp = netdev_priv(dev); + + if (features & NETIF_F_LOOPBACK) { + if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, true); + netif_carrier_on(tp->dev); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode enabled.\n"); + } else { + if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, false); + /* Force link status check */ + tg3_setup_phy(tp, 1); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode disabled.\n"); + } + } + + static u32 tg3_fix_features(struct net_device *dev, u32 features) + { + struct tg3 *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) + features &= ~NETIF_F_ALL_TSO; + + return features; + } + + static int tg3_set_features(struct net_device *dev, u32 features) + { + u32 changed = dev->features ^ features; + + if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) + tg3_set_loopback(dev, features); + + return 0; + } + + static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, + int new_mtu) + { + dev->mtu = new_mtu; + + if (new_mtu > ETH_DATA_LEN) { + if (tg3_flag(tp, 5780_CLASS)) { + netdev_update_features(dev); + tg3_flag_clear(tp, TSO_CAPABLE); + } else { + tg3_flag_set(tp, JUMBO_RING_ENABLE); + } + } else { + if (tg3_flag(tp, 5780_CLASS)) { + tg3_flag_set(tp, TSO_CAPABLE); + netdev_update_features(dev); + } + tg3_flag_clear(tp, JUMBO_RING_ENABLE); + } + } + + static int tg3_change_mtu(struct net_device *dev, int new_mtu) + { + struct tg3 *tp = netdev_priv(dev); + int err; + + if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) + return -EINVAL; + + if (!netif_running(dev)) { + /* We'll just catch it later when the + * device is up'd. + */ + tg3_set_mtu(dev, tp, new_mtu); + return 0; + } + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + + tg3_set_mtu(dev, tp, new_mtu); + + err = tg3_restart_hw(tp, 0); + + if (!err) + tg3_netif_start(tp); + + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; + } + + static void tg3_rx_prodring_free(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) + { + int i; + + if (tpr != &tp->napi[0].prodring) { + for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; + i = (i + 1) & tp->rx_std_ring_mask) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE)) { + for (i = tpr->rx_jmb_cons_idx; + i != tpr->rx_jmb_prod_idx; + i = (i + 1) & tp->rx_jmb_ring_mask) { + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } + } + + return; + } + + for (i = 0; i <= tp->rx_std_ring_mask; i++) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } + } + + /* Initialize rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ + static int tg3_rx_prodring_alloc(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) + { + u32 i, rx_pkt_dma_sz; + + tpr->rx_std_cons_idx = 0; + tpr->rx_std_prod_idx = 0; + tpr->rx_jmb_cons_idx = 0; + tpr->rx_jmb_prod_idx = 0; + + if (tpr != &tp->napi[0].prodring) { + memset(&tpr->rx_std_buffers[0], 0, + TG3_RX_STD_BUFF_RING_SIZE(tp)); + if (tpr->rx_jmb_buffers) + memset(&tpr->rx_jmb_buffers[0], 0, + TG3_RX_JMB_BUFF_RING_SIZE(tp)); + goto done; + } + + /* Zero out all descriptors. */ + memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); + + rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; + if (tg3_flag(tp, 5780_CLASS) && + tp->dev->mtu > ETH_DATA_LEN) + rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; + tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); + + /* Initialize invariants of the rings, we only set this + * stuff once. This works because the card does not + * write into the rx buffer posting rings. + */ + for (i = 0; i <= tp->rx_std_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_std[i]; + rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); + rxd->opaque = (RXD_OPAQUE_RING_STD | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + /* Now allocate fresh SKBs for each rx ring. */ + for (i = 0; i < tp->rx_pending; i++) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX standard ring. Only " + "%d out of %d buffers were allocated " + "successfully\n", i, tp->rx_pending); + if (i == 0) + goto initfail; + tp->rx_pending = i; + break; + } + } + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + goto done; + + memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); + + if (!tg3_flag(tp, JUMBO_RING_ENABLE)) + goto done; + + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_jmb[i].std; + rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | + RXD_FLAG_JUMBO; + rxd->opaque = (RXD_OPAQUE_RING_JUMBO | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + for (i = 0; i < tp->rx_jumbo_pending; i++) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX jumbo ring. Only %d " + "out of %d buffers were allocated " + "successfully\n", i, tp->rx_jumbo_pending); + if (i == 0) + goto initfail; + tp->rx_jumbo_pending = i; + break; + } + } + + done: + return 0; + + initfail: + tg3_rx_prodring_free(tp, tpr); + return -ENOMEM; + } + + static void tg3_rx_prodring_fini(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) + { + kfree(tpr->rx_std_buffers); + tpr->rx_std_buffers = NULL; + kfree(tpr->rx_jmb_buffers); + tpr->rx_jmb_buffers = NULL; + if (tpr->rx_std) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), + tpr->rx_std, tpr->rx_std_mapping); + tpr->rx_std = NULL; + } + if (tpr->rx_jmb) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), + tpr->rx_jmb, tpr->rx_jmb_mapping); + tpr->rx_jmb = NULL; + } + } + + static int tg3_rx_prodring_init(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) + { + tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_std_buffers) + return -ENOMEM; + + tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_STD_RING_BYTES(tp), + &tpr->rx_std_mapping, + GFP_KERNEL); + if (!tpr->rx_std) + goto err_out; + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_jmb_buffers) + goto err_out; + + tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_JMB_RING_BYTES(tp), + &tpr->rx_jmb_mapping, + GFP_KERNEL); + if (!tpr->rx_jmb) + goto err_out; + } + + return 0; + + err_out: + tg3_rx_prodring_fini(tp, tpr); + return -ENOMEM; + } + + /* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock is not held and we are not + * in an interrupt context and thus may sleep. + */ + static void tg3_free_rings(struct tg3 *tp) + { + int i, j; + + for (j = 0; j < tp->irq_cnt; j++) { + struct tg3_napi *tnapi = &tp->napi[j]; + + tg3_rx_prodring_free(tp, &tnapi->prodring); + + if (!tnapi->tx_buffers) + continue; + + for (i = 0; i < TG3_TX_RING_SIZE; i++) { + struct sk_buff *skb = tnapi->tx_buffers[i].skb; + + if (!skb) + continue; + + tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); + + dev_kfree_skb_any(skb); + } + } + } + + /* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ + static int tg3_init_rings(struct tg3 *tp) + { + int i; + + /* Free up all the SKBs. */ + tg3_free_rings(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + tnapi->tx_prod = 0; + tnapi->tx_cons = 0; + if (tnapi->tx_ring) + memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); + + tnapi->rx_rcb_ptr = 0; + if (tnapi->rx_rcb) + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + + if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + tg3_free_rings(tp); + return -ENOMEM; + } + } + + return 0; + } + + /* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ + static void tg3_free_consistent(struct tg3 *tp) + { + int i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tnapi->tx_ring) { + dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, + tnapi->tx_ring, tnapi->tx_desc_mapping); + tnapi->tx_ring = NULL; + } + + kfree(tnapi->tx_buffers); + tnapi->tx_buffers = NULL; + + if (tnapi->rx_rcb) { + dma_free_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + tnapi->rx_rcb, + tnapi->rx_rcb_mapping); + tnapi->rx_rcb = NULL; + } + + tg3_rx_prodring_fini(tp, &tnapi->prodring); + + if (tnapi->hw_status) { + dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, + tnapi->hw_status, + tnapi->status_mapping); + tnapi->hw_status = NULL; + } + } + + if (tp->hw_stats) { + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), + tp->hw_stats, tp->stats_mapping); + tp->hw_stats = NULL; + } + } + + /* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. Can sleep. + */ + static int tg3_alloc_consistent(struct tg3 *tp) + { + int i; + + tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, + GFP_KERNEL); + if (!tp->hw_stats) + goto err_out; + + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + struct tg3_hw_status *sblk; + + tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); + if (!tnapi->hw_status) + goto err_out; + + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + sblk = tnapi->hw_status; + + if (tg3_rx_prodring_init(tp, &tnapi->prodring)) + goto err_out; + + /* If multivector TSS is enabled, vector 0 does not handle + * tx interrupts. Don't allocate any resources for it. + */ + if ((!i && !tg3_flag(tp, ENABLE_TSS)) || + (i && tg3_flag(tp, ENABLE_TSS))) { + tnapi->tx_buffers = kzalloc( + sizeof(struct tg3_tx_ring_info) * + TG3_TX_RING_SIZE, GFP_KERNEL); + if (!tnapi->tx_buffers) + goto err_out; + + tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, + TG3_TX_RING_BYTES, + &tnapi->tx_desc_mapping, + GFP_KERNEL); + if (!tnapi->tx_ring) + goto err_out; + } + + /* + * When RSS is enabled, the status block format changes + * slightly. The "rx_jumbo_consumer", "reserved", + * and "rx_mini_consumer" members get mapped to the + * other three rx return ring producer indexes. + */ + switch (i) { + default: + tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; + break; + case 2: + tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; + break; + case 3: + tnapi->rx_rcb_prod_idx = &sblk->reserved; + break; + case 4: + tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; + break; + } + + /* + * If multivector RSS is enabled, vector 0 does not handle + * rx or tx interrupts. Don't allocate any resources for it. + */ + if (!i && tg3_flag(tp, ENABLE_RSS)) + continue; + + tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); + if (!tnapi->rx_rcb) + goto err_out; + + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + } + + return 0; + + err_out: + tg3_free_consistent(tp); + return -ENOMEM; + } + + #define MAX_WAIT_CNT 1000 + + /* To stop a block, clear the enable bit and poll till it + * clears. tp->lock is held. + */ + static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) + { + unsigned int i; + u32 val; + + if (tg3_flag(tp, 5705_PLUS)) { + switch (ofs) { + case RCVLSC_MODE: + case DMAC_MODE: + case MBFREE_MODE: + case BUFMGR_MODE: + case MEMARB_MODE: + /* We can't enable/disable these bits of the + * 5705/5750, just say success. + */ + return 0; + + default: + break; + } + } + + val = tr32(ofs); + val &= ~enable_bit; + tw32_f(ofs, val); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + val = tr32(ofs); + if ((val & enable_bit) == 0) + break; + } + + if (i == MAX_WAIT_CNT && !silent) { + dev_err(&tp->pdev->dev, + "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", + ofs, enable_bit); + return -ENODEV; + } + + return 0; + } + + /* tp->lock is held. */ + static int tg3_abort_hw(struct tg3 *tp, int silent) + { + int i, err; + + tg3_disable_ints(tp); + + tp->rx_mode &= ~RX_MODE_ENABLE; + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); + + err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); + + tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tp->tx_mode &= ~TX_MODE_ENABLE; + tw32_f(MAC_TX_MODE, tp->tx_mode); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) + break; + } + if (i >= MAX_WAIT_CNT) { + dev_err(&tp->pdev->dev, + "%s timed out, TX_MODE_ENABLE will not clear " + "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); + err |= -ENODEV; + } + + err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); + + tw32(FTQ_RESET, 0xffffffff); + tw32(FTQ_RESET, 0x00000000); + + err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + } + if (tp->hw_stats) + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + return err; + } + + /* Save PCI command register before chip reset */ + static void tg3_save_pci_state(struct tg3 *tp) + { + pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); + } + + /* Restore PCI state after chip reset */ + static void tg3_restore_pci_state(struct tg3 *tp) + { + u32 val; + + /* Re-enable indirect register accesses. */ + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + /* Set MAX PCI retry to zero. */ + val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) + val |= PCISTATE_RETRY_SAME_DMA; + /* Allow reads and writes to the APE register and memory space. */ + if (tg3_flag(tp, ENABLE_APE)) + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); + + pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { + if (tg3_flag(tp, PCI_EXPRESS)) + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + else { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + } + + /* Make sure PCI-X relaxed ordering bit is clear. */ + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + pcix_cmd &= ~PCI_X_CMD_ERO; + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + if (tg3_flag(tp, 5780_CLASS)) { + + /* Chip reset on 5780 will reset MSI enable bit, + * so need to restore it. + */ + if (tg3_flag(tp, USING_MSI)) { + u16 ctrl; + + pci_read_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + &ctrl); + pci_write_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + ctrl | PCI_MSI_FLAGS_ENABLE); + val = tr32(MSGINT_MODE); + tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); + } + } + } + + /* tp->lock is held. */ + static int tg3_chip_reset(struct tg3 *tp) + { + u32 val; + void (*write_op)(struct tg3 *, u32, u32); + int i, err; + + tg3_nvram_lock(tp); + + tg3_ape_lock(tp, TG3_APE_LOCK_GRC); + + /* No matching tg3_nvram_unlock() after this because + * chip reset below will undo the nvram lock. + */ + tp->nvram_lock_cnt = 0; + + /* GRC_MISC_CFG core clock reset will clear the memory + * enable bit in PCI register 4 and the MSI enable bit + * on some chips, so we save relevant registers here. + */ + tg3_save_pci_state(tp); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + tg3_flag(tp, 5755_PLUS)) + tw32(GRC_FASTBOOT_PC, 0); + + /* + * We must avoid the readl() that normally takes place. + * It locks machines, causes machine checks, and other + * fun things. So, temporarily disable the 5701 + * hardware workaround, while we do the reset. + */ + write_op = tp->write32; + if (write_op == tg3_write_flush_reg32) + tp->write32 = tg3_write32; + + /* Prevent the irq handler from reading or writing PCI registers + * during chip reset when the memory enable bit in the PCI command + * register may be cleared. The chip does not generate interrupt + * at this time, but the irq handler may still be called due to irq + * sharing or irqpoll. + */ + tg3_flag_set(tp, CHIP_RESETTING); + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) { + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + } + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + } + smp_mb(); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + /* do the reset */ + val = GRC_MISC_CFG_CORECLK_RESET; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Force PCIe 1.0a mode */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS) && + tr32(TG3_PCIE_PHY_TSTCTL) == + (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) + tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); + + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { + tw32(GRC_MISC_CFG, (1 << 29)); + val |= (1 << 29); + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); + tw32(GRC_VCPU_EXT_CTRL, + tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); + } + + /* Manage gphy power for all CPMU absent PCIe devices. */ + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) + val |= GRC_MISC_CFG_KEEP_GPHY_POWER; + + tw32(GRC_MISC_CFG, val); + + /* restore 5701 hardware bug workaround write method */ + tp->write32 = write_op; + + /* Unfortunately, we have to delay before the PCI read back. + * Some 575X chips even will not respond to a PCI cfg access + * when the reset command is given to the chip. + * + * How do these hardware designers expect things to work + * properly if the PCI write is posted for a long period + * of time? It is always necessary to have some method by + * which a register read back can occur to push the write + * out which does the reset. + * + * For most tg3 variants the trick below was working. + * Ho hum... + */ + udelay(120); + + /* Flush PCI posted writes. The normal MMIO registers + * are inaccessible at this time so this is the only + * way to make this reliably (actually, this is no longer + * the case, see above). I tried to use indirect + * register read/write but this upset some 5701 variants. + */ + pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); + + udelay(120); + + if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { + u16 val16; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { + int i; + u32 cfg_val; + + /* Wait for link training to complete. */ + for (i = 0; i < 5000; i++) + udelay(100); + + pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); + pci_write_config_dword(tp->pdev, 0xc4, + cfg_val | (1 << 15)); + } + + /* Clear the "no snoop" and "relaxed ordering" bits. */ + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, + &val16); + val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN); + /* + * Older PCIe devices only support the 128 byte + * MPS setting. Enforce the restriction. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) + val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, + val16); + + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + + /* Clear error status */ + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_URD); + } + + tg3_restore_pci_state(tp); + + tg3_flag_clear(tp, CHIP_RESETTING); + tg3_flag_clear(tp, ERROR_PROCESSED); + + val = 0; + if (tg3_flag(tp, 5780_CLASS)) + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { + tg3_stop_fw(tp); + tw32(0x5000, 0x400); + } + + tw32(GRC_MODE, tp->grc_mode); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { + val = tr32(0xc4); + + tw32(0xc4, val | (1 << 15)); + } + + if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) + tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; + tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_TBI; + val = tp->mac_mode; + } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_GMII; + val = tp->mac_mode; + } else + val = 0; + + tw32_f(MAC_MODE, val); + udelay(40); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); + + err = tg3_poll_fw(tp); + if (err) + return err; + + tg3_mdio_start(tp); + + if (tg3_flag(tp, PCI_EXPRESS) && + tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS)) { + val = tr32(0x7c00); + + tw32(0x7c00, val | (1 << 25)); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_CPMU_CLCK_ORIDE); + tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); + } + + /* Reprobe ASF enable state. */ + tg3_flag_clear(tp, ENABLE_ASF); + tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + tp->last_event_jiffies = jiffies; + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + } + } + + return 0; + } + + /* tp->lock is held. */ + static int tg3_halt(struct tg3 *tp, int kind, int silent) + { + int err; + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, kind); + + tg3_abort_hw(tp, silent); + err = tg3_chip_reset(tp); + + __tg3_set_mac_addr(tp, 0); + + tg3_write_sig_legacy(tp, kind); + tg3_write_sig_post_reset(tp, kind); + + if (err) + return err; + + return 0; + } + + static int tg3_set_mac_addr(struct net_device *dev, void *p) + { + struct tg3 *tp = netdev_priv(dev); + struct sockaddr *addr = p; + int err = 0, skip_mac_1 = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + if (!netif_running(dev)) + return 0; + + if (tg3_flag(tp, ENABLE_ASF)) { + u32 addr0_high, addr0_low, addr1_high, addr1_low; + + addr0_high = tr32(MAC_ADDR_0_HIGH); + addr0_low = tr32(MAC_ADDR_0_LOW); + addr1_high = tr32(MAC_ADDR_1_HIGH); + addr1_low = tr32(MAC_ADDR_1_LOW); + + /* Skip MAC addr 1 if ASF is using it. */ + if ((addr0_high != addr1_high || addr0_low != addr1_low) && + !(addr1_high == 0 && addr1_low == 0)) + skip_mac_1 = 1; + } + spin_lock_bh(&tp->lock); + __tg3_set_mac_addr(tp, skip_mac_1); + spin_unlock_bh(&tp->lock); + + return err; + } + + /* tp->lock is held. */ + static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, + dma_addr_t mapping, u32 maxlen_flags, + u32 nic_addr) + { + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), + ((u64) mapping >> 32)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), + ((u64) mapping & 0xffffffff)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), + maxlen_flags); + + if (!tg3_flag(tp, 5705_PLUS)) + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_NIC_ADDR), + nic_addr); + } + + static void __tg3_set_rx_mode(struct net_device *); + static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) + { + int i; + + if (!tg3_flag(tp, ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); + tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); + tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_TXCOL_TICKS, 0); + tw32(HOSTCC_TXMAX_FRAMES, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT, 0); + } + + if (!tg3_flag(tp, ENABLE_RSS)) { + tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); + tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); + tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_RXCOL_TICKS, 0); + tw32(HOSTCC_RXMAX_FRAMES, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT, 0); + } + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 val = ec->stats_block_coalesce_usecs; + + tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); + tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); + + if (!netif_carrier_ok(tp->dev)) + val = 0; + + tw32(HOSTCC_STAT_COAL_TICKS, val); + } + + for (i = 0; i < tp->irq_cnt - 1; i++) { + u32 reg; + + reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->rx_coalesce_usecs); + reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames); + reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames_irq); + + if (tg3_flag(tp, ENABLE_TSS)) { + reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->tx_coalesce_usecs); + reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames); + reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames_irq); + } + } + + for (; i < tp->irq_max - 1; i++) { + tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + + if (tg3_flag(tp, ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + } + } + } + + /* tp->lock is held. */ + static void tg3_rings_reset(struct tg3 *tp) + { + int i; + u32 stblk, txrcb, rxrcb, limit; + struct tg3_napi *tnapi = &tp->napi[0]; + + /* Disable all transmit rings but the first. */ + if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; + else + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + + for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + txrcb < limit; txrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + + /* Disable all receive return rings but the first. */ + if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; + else if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; + else + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + + for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Disable interrupts */ + tw32_mailbox_f(tp->napi[0].int_mbox, 1); + tp->napi[0].chk_msi_cnt = 0; + tp->napi[0].last_rx_cons = 0; + tp->napi[0].last_tx_cons = 0; + + /* Zero mailbox registers. */ + if (tg3_flag(tp, SUPPORT_MSIX)) { + for (i = 1; i < tp->irq_max; i++) { + tp->napi[i].tx_prod = 0; + tp->napi[i].tx_cons = 0; + if (tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[i].prodmbox, 0); + tw32_rx_mbox(tp->napi[i].consmbox, 0); + tw32_mailbox_f(tp->napi[i].int_mbox, 1); + tp->napi[i].chk_msi_cnt = 0; + tp->napi[i].last_rx_cons = 0; + tp->napi[i].last_tx_cons = 0; + } + if (!tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[0].prodmbox, 0); + } else { + tp->napi[0].tx_prod = 0; + tp->napi[0].tx_cons = 0; + tw32_mailbox(tp->napi[0].prodmbox, 0); + tw32_rx_mbox(tp->napi[0].consmbox, 0); + } + + /* Make sure the NIC-based send BD rings are disabled. */ + if (!tg3_flag(tp, 5705_PLUS)) { + u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < 16; i++) + tw32_tx_mbox(mbox + i * 8, 0); + } + + txrcb = NIC_SRAM_SEND_RCB; + rxrcb = NIC_SRAM_RCV_RET_RCB; + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + /* Set status block DMA address */ + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tnapi->status_mapping >> 32)); + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tnapi->status_mapping & 0xffffffff)); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + if (tnapi->rx_rcb) { + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + rxrcb += TG3_BDINFO_SIZE; + } + + stblk = HOSTCC_STATBLCK_RING1; + + for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { + u64 mapping = (u64)tnapi->status_mapping; + tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); + tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + ((tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT), 0); + + stblk += 8; + rxrcb += TG3_BDINFO_SIZE; + } + } + + static void tg3_setup_rxbd_thresholds(struct tg3 *tp) + { + u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; + + if (!tg3_flag(tp, 5750_PLUS) || + tg3_flag(tp, 5780_CLASS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; + else + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; + + nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); + host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); + + val = min(nic_rep_thresh, host_rep_thresh); + tw32(RCVBDI_STD_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(STD_REPLENISH_LWM, bdcache_maxcnt); + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; + else + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; + + host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); + + val = min(bdcache_maxcnt / 2, host_rep_thresh); + tw32(RCVBDI_JUMBO_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); + } + + /* tp->lock is held. */ + static int tg3_reset_hw(struct tg3 *tp, int reset_phy) + { + u32 val, rdmac_mode; + int i, err, limit; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tg3_disable_ints(tp); + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); + + if (tg3_flag(tp, INIT_COMPLETE)) + tg3_abort_hw(tp, 1); + + /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, + TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + TG3_CPMU_EEEMD_LPI_IN_TX | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, val); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + TG3_CPMU_DBTMR1_LNKIDLE_2047US); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); + } + + if (reset_phy) + tg3_phy_reset(tp); + + err = tg3_chip_reset(tp); + if (err) + return err; + + tg3_write_sig_legacy(tp, RESET_KIND_INIT); + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + val = tr32(TG3_CPMU_CTRL); + val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); + tw32(TG3_CPMU_CTRL, val); + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + + val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); + val &= ~CPMU_LNK_AWARE_MACCLK_MASK; + val |= CPMU_LNK_AWARE_MACCLK_6_25; + tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); + + val = tr32(TG3_CPMU_HST_ACC); + val &= ~CPMU_HST_ACC_MACCLK_MASK; + val |= CPMU_HST_ACC_MACCLK_6_25; + tw32(TG3_CPMU_HST_ACC, val); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; + val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | + PCIE_PWR_MGMT_L1_THRESH_4MS; + tw32(PCIE_PWR_MGMT_THRESH, val); + + val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; + tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); + + tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); + + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + if (tg3_flag(tp, L1PLLPD_EN)) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, + val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); + + tw32(GRC_MODE, grc_mode); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_PL_LO_PHYCTL5); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, + val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); + + tw32(GRC_MODE, grc_mode); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of DL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_DL_LO_FTSMAX); + val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, + val | TG3_PCIE_DL_LO_FTSMAX_VAL); + + tw32(GRC_MODE, grc_mode); + } + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + } + + /* This works around an issue with Athlon chipsets on + * B3 tigon3 silicon. This bit has no effect on any + * other revision. But do not set this on PCI Express + * chips and don't even touch the clocks if the CPMU is present. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) { + if (!tg3_flag(tp, PCI_EXPRESS)) + tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; + tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) { + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_RETRY_SAME_DMA; + tw32(TG3PCI_PCISTATE, val); + } + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + tw32(TG3PCI_PCISTATE, val); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { + /* Enable some hw fixes. */ + val = tr32(TG3PCI_MSI_DATA); + val |= (1 << 26) | (1 << 28) | (1 << 29); + tw32(TG3PCI_MSI_DATA, val); + } + + /* Descriptor ring init may make accesses to the + * NIC SRAM area to setup the TX descriptors, so we + * can only do this after the hardware has been + * successfully reset. + */ + err = tg3_init_rings(tp); + if (err) + return err; + + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(TG3PCI_DMA_RW_CTRL) & + ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) + val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= DMA_RWCTRL_TAGGED_STAT_WA; + tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { + /* This value is determined during the probe time DMA + * engine test, tg3_test_dma. + */ + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + + tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | + GRC_MODE_4X_NIC_SEND_RINGS | + GRC_MODE_NO_TX_PHDR_CSUM | + GRC_MODE_NO_RX_PHDR_CSUM); + tp->grc_mode |= GRC_MODE_HOST_SENDBDS; + + /* Pseudo-header checksum is done by hardware logic and not + * the offload processers, so make the chip do the pseudo- + * header checksums on receive. For transmit it is more + * convenient to do the pseudo-header checksum in software + * as Linux does that on transmit for us in all cases. + */ + tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; + + tw32(GRC_MODE, + tp->grc_mode | + (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); + + /* Setup the timer prescalar register. Clock is always 66Mhz. */ + val = tr32(GRC_MISC_CFG); + val &= ~0xff; + val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + + /* Initialize MBUF/DESC pool. */ + if (tg3_flag(tp, 5750_PLUS)) { + /* Do nothing. */ + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { + tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); + else + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); + tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); + tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); + } else if (tg3_flag(tp, TSO_CAPABLE)) { + int fw_len; + + fw_len = tp->fw_len; + fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); + tw32(BUFMGR_MB_POOL_ADDR, + NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); + tw32(BUFMGR_MB_POOL_SIZE, + NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); + } + + if (tp->dev->mtu <= ETH_DATA_LEN) { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water); + } else { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water_jumbo); + } + tw32(BUFMGR_DMA_LOW_WATER, + tp->bufmgr_config.dma_low_water); + tw32(BUFMGR_DMA_HIGH_WATER, + tp->bufmgr_config.dma_high_water); + + val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + val |= BUFMGR_MODE_NO_TX_UNDERRUN; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) + val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; + tw32(BUFMGR_MODE, val); + for (i = 0; i < 2000; i++) { + if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) + break; + udelay(10); + } + if (i >= 2000) { + netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); + return -ENODEV; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) + tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); + + tg3_setup_rxbd_thresholds(tp); + + /* Initialize TG3_BDINFO's at: + * RCVDBDI_STD_BD: standard eth size rx ring + * RCVDBDI_JUMBO_BD: jumbo frame rx ring + * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) + * + * like so: + * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring + * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | + * ring attribute flags + * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM + * + * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. + * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. + * + * The size of each ring is fixed in the firmware, but the location is + * configurable. + */ + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_std_mapping >> 32)); + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_std_mapping & 0xffffffff)); + if (!tg3_flag(tp, 5717_PLUS)) + tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_BUFFER_DESC); + + /* Disable the mini ring */ + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Program the jumbo buffer descriptor ring control + * blocks on those devices that have them. + */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { + + if (tg3_flag(tp, JUMBO_RING_ENABLE)) { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_jmb_mapping >> 32)); + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_jmb_mapping & 0xffffffff)); + val = TG3_RX_JMB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT; + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + val | BDINFO_FLAGS_USE_EXT_RECV); + if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_JUMBO_BUFFER_DESC); + } else { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + } + + if (tg3_flag(tp, 57765_PLUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = TG3_RX_STD_MAX_SIZE_5700; + else + val = TG3_RX_STD_MAX_SIZE_5717; + val <<= BDINFO_FLAGS_MAXLEN_SHIFT; + val |= (TG3_RX_STD_DMA_SZ << 2); + } else + val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; + } else + val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; + + tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); + + tpr->rx_std_prod_idx = tp->rx_pending; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); + + tpr->rx_jmb_prod_idx = + tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); + + tg3_rings_reset(tp); + + /* Initialize MAC address and backoff seed. */ + __tg3_set_mac_addr(tp, 0); + + /* MTU + ethernet header + FCS + optional VLAN tag */ + tw32(MAC_RX_MTU_SIZE, + tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + /* The slot time is changed by tg3_setup_phy if we + * run at gigabit with half duplex. + */ + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + tw32(MAC_TX_LENGTHS, val); + + /* Receive rules. */ + tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); + tw32(RCVLPC_CONFIG, 0x0181); + + /* Calculate RDMAC_MODE setting early, we need it to determine + * the RCVLPC_STATE_ENABLE mask. + */ + rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | + RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | + RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | + RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | + RDMAC_MODE_LNGREAD_ENAB); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | + RDMAC_MODE_MBUF_RBD_CRPT_ENAB | + RDMAC_MODE_MBUF_SBD_CRPT_ENAB; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + } + } + + if (tg3_flag(tp, PCI_EXPRESS)) + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; + + if (tg3_flag(tp, 57765_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) { + val = tr32(TG3_RDMA_RSRVCTRL_REG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | + TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | + TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); + val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | + TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | + TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; + } + tw32(TG3_RDMA_RSRVCTRL_REG, + val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); + tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); + } + + /* Receive/send statistics. */ + if (tg3_flag(tp, 5750_PLUS)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_DACK_FIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && + tg3_flag(tp, TSO_CAPABLE)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else { + tw32(RCVLPC_STATS_ENABLE, 0xffffff); + } + tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); + tw32(SNDDATAI_STATSENAB, 0xffffff); + tw32(SNDDATAI_STATSCTRL, + (SNDDATAI_SCTRL_ENABLE | + SNDDATAI_SCTRL_FASTUPD)); + + /* Setup host coalescing engine. */ + tw32(HOSTCC_MODE, 0); + for (i = 0; i < 2000; i++) { + if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) + break; + udelay(10); + } + + __tg3_set_coalesce(tp, &tp->coal); + + if (!tg3_flag(tp, 5705_PLUS)) { + /* Status/statistics block address. See tg3_timer, + * the tg3_periodic_fetch_stats call there, and + * tg3_get_stats to see how this works for 5705/5750 chips. + */ + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tp->stats_mapping >> 32)); + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tp->stats_mapping & 0xffffffff)); + tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); + + tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); + + /* Clear statistics and status block memory areas */ + for (i = NIC_SRAM_STATS_BLK; + i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; + i += sizeof(u32)) { + tg3_write_mem(tp, i, 0); + udelay(40); + } + } + + tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); + + tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); + tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + /* reset to prevent losing 1st rx packet intermittently */ + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + + tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | + MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | + MAC_MODE_FHDE_ENABLE; + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + if (!tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); + udelay(40); + + /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). + * If TG3_FLAG_IS_NIC is zero, we should read the + * register to preserve the GPIO settings for LOMs. The GPIOs, + * whether used as inputs or outputs, are set by boot code after + * reset. + */ + if (!tg3_flag(tp, IS_NIC)) { + u32 gpio_mask; + + gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | + GRC_LCLCTRL_GPIO_OUTPUT3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; + + tp->grc_local_ctrl &= ~gpio_mask; + tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; + + /* GPIO1 must be driven high for eeprom write protect */ + if (tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + } + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(100); + + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { + val = tr32(MSGINT_MODE); + val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; + if (!tg3_flag(tp, 1SHOT_MSI)) + val |= MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + + if (!tg3_flag(tp, 5705_PLUS)) { + tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); + udelay(40); + } + + val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | + WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | + WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | + WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | + WDMAC_MODE_LNGREAD_ENAB); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || + tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { + /* nothing */ + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + val |= WDMAC_MODE_RX_ACCEL; + } + } + + /* Enable host coalescing bug fix */ + if (tg3_flag(tp, 5755_PLUS)) + val |= WDMAC_MODE_STATUS_TAG_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + val |= WDMAC_MODE_BURST_ALL_DATA; + + tw32_f(WDMAC_MODE, val); + udelay(40); + + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { + pcix_cmd &= ~PCI_X_CMD_MAX_READ; + pcix_cmd |= PCI_X_CMD_READ_2K; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); + pcix_cmd |= PCI_X_CMD_READ_2K; + } + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + tw32_f(RDMAC_MODE, rdmac_mode); + udelay(40); + + tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tw32(SNDDATAC_MODE, + SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); + else + tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); + + tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); + tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); + val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + val |= RCVDBDI_MODE_LRG_RING_SZ; + tw32(RCVDBDI_MODE, val); + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); + val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; + if (tg3_flag(tp, ENABLE_TSS)) + val |= SNDBDI_MODE_MULTI_TXQ_EN; + tw32(SNDBDI_MODE, val); + tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + err = tg3_load_5701_a0_firmware_fix(tp); + if (err) + return err; + } + + if (tg3_flag(tp, TSO_CAPABLE)) { + err = tg3_load_tso_firmware(tp); + if (err) + return err; + } + + tp->tx_mode = TX_MODE_ENABLE; + + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; + tp->tx_mode &= ~val; + tp->tx_mode |= tr32(MAC_TX_MODE) & val; + } + + tw32_f(MAC_TX_MODE, tp->tx_mode); + udelay(100); + + if (tg3_flag(tp, ENABLE_RSS)) { + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + if (tp->irq_cnt == 2) { + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) { + tw32(reg, 0x0); + reg += 4; + } + } else { + u32 val; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + val = i % (tp->irq_cnt - 1); + i++; + for (; i % 8; i++) { + val <<= 4; + val |= (i % (tp->irq_cnt - 1)); + } + tw32(reg, val); + reg += 4; + } + } + + /* Setup the "secret" hash key. */ + tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); + tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); + tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); + tw32(MAC_RSS_HASH_KEY_3, 0x36621985); + tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); + tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); + tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); + tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); + tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); + tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); + } + + tp->rx_mode = RX_MODE_ENABLE; + if (tg3_flag(tp, 5755_PLUS)) + tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + + if (tg3_flag(tp, ENABLE_RSS)) + tp->rx_mode |= RX_MODE_RSS_ENABLE | + RX_MODE_RSS_ITBL_HASH_BITS_7 | + RX_MODE_RSS_IPV6_HASH_EN | + RX_MODE_RSS_TCP_IPV6_HASH_EN | + RX_MODE_RSS_IPV4_HASH_EN | + RX_MODE_RSS_TCP_IPV4_HASH_EN; + + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + tw32(MAC_LED_CTRL, tp->led_ctrl); + + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && + !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { + /* Set drive transmission level to 1.2V */ + /* only if the signal pre-emphasis bit is not set */ + val = tr32(MAC_SERDES_CFG); + val &= 0xfffff000; + val |= 0x880; + tw32(MAC_SERDES_CFG, val); + } + if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) + tw32(MAC_SERDES_CFG, 0x616000); + } + + /* Prevent chip from dropping frames when flow control + * is enabled. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = 1; + else + val = 2; + tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + /* Use hardware link auto-negotiation */ + tg3_flag_set(tp, HW_AUTONEG); + } + + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + u32 tmp; + + tmp = tr32(SERDES_RX_CTRL); + tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); + tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; + tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + } + + if (!tg3_flag(tp, USE_PHYLIB)) { + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + tp->link_config.speed = tp->link_config.orig_speed; + tp->link_config.duplex = tp->link_config.orig_duplex; + tp->link_config.autoneg = tp->link_config.orig_autoneg; + } + + err = tg3_setup_phy(tp, 0); + if (err) + return err; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + u32 tmp; + + /* Clear CRC stats. */ + if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { + tg3_writephy(tp, MII_TG3_TEST1, + tmp | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); + } + } + } + + __tg3_set_rx_mode(tp->dev); + + /* Initialize receive rules. */ + tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); + + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) + limit = 8; + else + limit = 16; + if (tg3_flag(tp, ENABLE_ASF)) + limit -= 4; + switch (limit) { + case 16: + tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + case 15: + tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + case 14: + tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + case 13: + tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + case 12: + tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + case 11: + tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + case 10: + tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + case 9: + tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + case 8: + tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + case 7: + tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + case 6: + tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + case 5: + tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + case 4: + /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ + case 3: + /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ + case 2: + case 1: + + default: + break; + } + + if (tg3_flag(tp, ENABLE_APE)) + /* Write our heartbeat update interval to APE. */ + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, + APE_HOST_HEARTBEAT_INT_DISABLE); + + tg3_write_sig_post_reset(tp, RESET_KIND_INIT); + + return 0; + } + + /* Called at device open time to get the chip ready for + * packet processing. Invoked with tp->lock held. + */ + static int tg3_init_hw(struct tg3 *tp, int reset_phy) + { + tg3_switch_clocks(tp); + + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + return tg3_reset_hw(tp, reset_phy); + } + + #define TG3_STAT_ADD32(PSTAT, REG) \ + do { u32 __val = tr32(REG); \ + (PSTAT)->low += __val; \ + if ((PSTAT)->low < __val) \ + (PSTAT)->high += 1; \ + } while (0) + + static void tg3_periodic_fetch_stats(struct tg3 *tp) + { + struct tg3_hw_stats *sp = tp->hw_stats; + + if (!netif_carrier_ok(tp->dev)) + return; + + TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); + TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); + TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); + TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); + TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); + TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); + TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); + TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); + TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); + + TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); + TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); + TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); + TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); + TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); + TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); + TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); + TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); + TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); + TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); + + TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { + TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); + } else { + u32 val = tr32(HOSTCC_FLOW_ATTN); + val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; + if (val) { + tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); + sp->rx_discards.low += val; + if (sp->rx_discards.low < val) + sp->rx_discards.high += 1; + } + sp->mbuf_lwm_thresh_hit = sp->rx_discards; + } + TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); + } + + static void tg3_chk_missed_msi(struct tg3 *tp) + { + u32 i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tg3_has_work(tnapi)) { + if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && + tnapi->last_tx_cons == tnapi->tx_cons) { + if (tnapi->chk_msi_cnt < 1) { + tnapi->chk_msi_cnt++; + return; + } + tg3_msi(0, tnapi); + } + } + tnapi->chk_msi_cnt = 0; + tnapi->last_rx_cons = tnapi->rx_rcb_ptr; + tnapi->last_tx_cons = tnapi->tx_cons; + } + } + + static void tg3_timer(unsigned long __opaque) + { + struct tg3 *tp = (struct tg3 *) __opaque; + + if (tp->irq_sync) + goto restart_timer; + + spin_lock(&tp->lock); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_chk_missed_msi(tp); + + if (!tg3_flag(tp, TAGGED_STATUS)) { + /* All of this garbage is because when using non-tagged + * IRQ status the mailbox/status_block protocol the chip + * uses with the cpu is race prone. + */ + if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { + tw32(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + } else { + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); + } + + if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tg3_flag_set(tp, RESTART_TIMER); + spin_unlock(&tp->lock); + schedule_work(&tp->reset_task); + return; + } + } + + /* This part only runs once per second. */ + if (!--tp->timer_counter) { + if (tg3_flag(tp, 5705_PLUS)) + tg3_periodic_fetch_stats(tp); + + if (tp->setlpicnt && !--tp->setlpicnt) + tg3_phy_eee_enable(tp); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + u32 mac_stat; + int phy_event; + + mac_stat = tr32(MAC_STATUS); + + phy_event = 0; + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { + if (mac_stat & MAC_STATUS_MI_INTERRUPT) + phy_event = 1; + } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) + phy_event = 1; + + if (phy_event) + tg3_setup_phy(tp, 0); + } else if (tg3_flag(tp, POLL_SERDES)) { + u32 mac_stat = tr32(MAC_STATUS); + int need_setup = 0; + + if (netif_carrier_ok(tp->dev) && + (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { + need_setup = 1; + } + if (!netif_carrier_ok(tp->dev) && + (mac_stat & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET))) { + need_setup = 1; + } + if (need_setup) { + if (!tp->serdes_counter) { + tw32_f(MAC_MODE, + (tp->mac_mode & + ~MAC_MODE_PORT_MODE_MASK)); + udelay(40); + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + tg3_setup_phy(tp, 0); + } + } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tg3_serdes_parallel_detect(tp); + } + + tp->timer_counter = tp->timer_multiplier; + } + + /* Heartbeat is only sent once every 2 seconds. + * + * The heartbeat is to tell the ASF firmware that the host + * driver is still alive. In the event that the OS crashes, + * ASF needs to reset the hardware to free up the FIFO space + * that may be filled with rx packets destined for the host. + * If the FIFO is full, ASF will no longer function properly. + * + * Unintended resets have been reported on real time kernels + * where the timer doesn't run on time. Netpoll will also have + * same problem. + * + * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware + * to check the ring condition when the heartbeat is expiring + * before doing the reset. This will prevent most unintended + * resets. + */ + if (!--tp->asf_counter) { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, + FWCMD_NICDRV_ALIVE3); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, + TG3_FW_UPDATE_TIMEOUT_SEC); + + tg3_generate_fw_event(tp); + } + tp->asf_counter = tp->asf_multiplier; + } + + spin_unlock(&tp->lock); + + restart_timer: + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + } + + static int tg3_request_irq(struct tg3 *tp, int irq_num) + { + irq_handler_t fn; + unsigned long flags; + char *name; + struct tg3_napi *tnapi = &tp->napi[irq_num]; + + if (tp->irq_cnt == 1) + name = tp->dev->name; + else { + name = &tnapi->irq_lbl[0]; + snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); + name[IFNAMSIZ-1] = 0; + } + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + fn = tg3_msi; + if (tg3_flag(tp, 1SHOT_MSI)) + fn = tg3_msi_1shot; + flags = 0; + } else { + fn = tg3_interrupt; + if (tg3_flag(tp, TAGGED_STATUS)) + fn = tg3_interrupt_tagged; + flags = IRQF_SHARED; + } + + return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); + } + + static int tg3_test_interrupt(struct tg3 *tp) + { + struct tg3_napi *tnapi = &tp->napi[0]; + struct net_device *dev = tp->dev; + int err, i, intr_ok = 0; + u32 val; + + if (!netif_running(dev)) + return -ENODEV; + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + /* + * Turn off MSI one shot mode. Otherwise this test has no + * observable way to know whether the interrupt was delivered. + */ + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + + err = request_irq(tnapi->irq_vec, tg3_test_isr, + IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); + if (err) + return err; + + tnapi->hw_status->status &= ~SD_STATUS_UPDATED; + tg3_enable_ints(tp); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + tnapi->coal_now); + + for (i = 0; i < 5; i++) { + u32 int_mbox, misc_host_ctrl; + + int_mbox = tr32_mailbox(tnapi->int_mbox); + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + + if ((int_mbox != 0) || + (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { + intr_ok = 1; + break; + } + + if (tg3_flag(tp, 57765_PLUS) && + tnapi->hw_status->status_tag != tnapi->last_tag) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + msleep(10); + } + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + err = tg3_request_irq(tp, 0); + + if (err) + return err; + + if (intr_ok) { + /* Reenable MSI one shot mode. */ + if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { + val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + return 0; + } + + return -EIO; + } + + /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is + * successfully restored + */ + static int tg3_test_msi(struct tg3 *tp) + { + int err; + u16 pci_cmd; + + if (!tg3_flag(tp, USING_MSI)) + return 0; + + /* Turn off SERR reporting in case MSI terminates with Master + * Abort. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_write_config_word(tp->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = tg3_test_interrupt(tp); + + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + if (!err) + return 0; + + /* other failures */ + if (err != -EIO) + return err; + + /* MSI test failed, go back to INTx mode */ + netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " + "to INTx mode. Please report this failure to the PCI " + "maintainer and include system chipset information\n"); + + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + pci_disable_msi(tp->pdev); + + tg3_flag_clear(tp, USING_MSI); + tp->napi[0].irq_vec = tp->pdev->irq; + + err = tg3_request_irq(tp, 0); + if (err) + return err; + + /* Need to reset the chip because the MSI cycle may have terminated + * with Master Abort. + */ + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_init_hw(tp, 1); + + tg3_full_unlock(tp); + + if (err) + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + return err; + } + + static int tg3_request_firmware(struct tg3 *tp) + { + const __be32 *fw_data; + + if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { + netdev_err(tp->dev, "Failed to load firmware "%s"\n", + tp->fw_needed); + return -ENOENT; + } + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + * start address and _full_ length including BSS sections + * (which must be longer than the actual data, of course + */ + + tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ + if (tp->fw_len < (tp->fw->size - 12)) { + netdev_err(tp->dev, "bogus length %d in "%s"\n", + tp->fw_len, tp->fw_needed); + release_firmware(tp->fw); + tp->fw = NULL; + return -EINVAL; + } + + /* We no longer need firmware; we have it. */ + tp->fw_needed = NULL; + return 0; + } + + static bool tg3_enable_msix(struct tg3 *tp) + { + int i, rc, cpus = num_online_cpus(); + struct msix_entry msix_ent[tp->irq_max]; + + if (cpus == 1) + /* Just fallback to the simpler MSI mode. */ + return false; + + /* + * We want as many rx rings enabled as there are cpus. + * The first MSIX vector only deals with link interrupts, etc, + * so we add one to the number of vectors we are requesting. + */ + tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); + + for (i = 0; i < tp->irq_max; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); + if (rc < 0) { + return false; + } else if (rc != 0) { + if (pci_enable_msix(tp->pdev, msix_ent, rc)) + return false; + netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", + tp->irq_cnt, rc); + tp->irq_cnt = rc; + } + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].irq_vec = msix_ent[i].vector; + + netif_set_real_num_tx_queues(tp->dev, 1); + rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1; + if (netif_set_real_num_rx_queues(tp->dev, rc)) { + pci_disable_msix(tp->pdev); + return false; + } + + if (tp->irq_cnt > 1) { + tg3_flag_set(tp, ENABLE_RSS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_flag_set(tp, ENABLE_TSS); + netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); + } + } + + return true; + } + + static void tg3_ints_init(struct tg3 *tp) + { + if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && + !tg3_flag(tp, TAGGED_STATUS)) { + /* All MSI supporting chips should support tagged + * status. Assert that this is the case. + */ + netdev_warn(tp->dev, + "MSI without TAGGED_STATUS? Not using MSI\n"); + goto defcfg; + } + + if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) + tg3_flag_set(tp, USING_MSIX); + else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) + tg3_flag_set(tp, USING_MSI); + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + u32 msi_mode = tr32(MSGINT_MODE); + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) + msi_mode |= MSGINT_MODE_MULTIVEC_EN; + if (!tg3_flag(tp, 1SHOT_MSI)) + msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); + } + defcfg: + if (!tg3_flag(tp, USING_MSIX)) { + tp->irq_cnt = 1; + tp->napi[0].irq_vec = tp->pdev->irq; + netif_set_real_num_tx_queues(tp->dev, 1); + netif_set_real_num_rx_queues(tp->dev, 1); + } + } + + static void tg3_ints_fini(struct tg3 *tp) + { + if (tg3_flag(tp, USING_MSIX)) + pci_disable_msix(tp->pdev); + else if (tg3_flag(tp, USING_MSI)) + pci_disable_msi(tp->pdev); + tg3_flag_clear(tp, USING_MSI); + tg3_flag_clear(tp, USING_MSIX); + tg3_flag_clear(tp, ENABLE_RSS); + tg3_flag_clear(tp, ENABLE_TSS); + } + + static int tg3_open(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + int i, err; + + if (tp->fw_needed) { + err = tg3_request_firmware(tp); + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + if (err) + return err; + } else if (err) { + netdev_warn(tp->dev, "TSO capability disabled\n"); + tg3_flag_clear(tp, TSO_CAPABLE); + } else if (!tg3_flag(tp, TSO_CAPABLE)) { + netdev_notice(tp->dev, "TSO capability restored\n"); + tg3_flag_set(tp, TSO_CAPABLE); + } + } + + netif_carrier_off(tp->dev); + + err = tg3_power_up(tp); + if (err) + return err; + + tg3_full_lock(tp, 0); + + tg3_disable_ints(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + /* + * Setup interrupts first so we know how + * many NAPI resources to allocate + */ + tg3_ints_init(tp); + + /* The placement of this call is tied + * to the setup and use of Host TX descriptors. + */ + err = tg3_alloc_consistent(tp); + if (err) + goto err_out1; + + tg3_napi_init(tp); + + tg3_napi_enable(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + err = tg3_request_irq(tp, i); + if (err) { + for (i--; i >= 0; i--) + free_irq(tnapi->irq_vec, tnapi); + break; + } + } + + if (err) + goto err_out2; + + tg3_full_lock(tp, 0); + + err = tg3_init_hw(tp, 1); + if (err) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + } else { + if (tg3_flag(tp, TAGGED_STATUS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) + tp->timer_offset = HZ; + else + tp->timer_offset = HZ / 10; + + BUG_ON(tp->timer_offset > HZ); + tp->timer_counter = tp->timer_multiplier = + (HZ / tp->timer_offset); + tp->asf_counter = tp->asf_multiplier = + ((HZ / tp->timer_offset) * 2); + + init_timer(&tp->timer); + tp->timer.expires = jiffies + tp->timer_offset; + tp->timer.data = (unsigned long) tp; + tp->timer.function = tg3_timer; + } + + tg3_full_unlock(tp); + + if (err) + goto err_out3; + + if (tg3_flag(tp, USING_MSI)) { + err = tg3_test_msi(tp); + + if (err) { + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_full_unlock(tp); + + goto err_out2; + } + + if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { + u32 val = tr32(PCIE_TRANSACTION_CFG); + + tw32(PCIE_TRANSACTION_CFG, + val | PCIE_TRANS_CFG_1SHOT_MSI); + } + } + + tg3_phy_start(tp); + + tg3_full_lock(tp, 0); + + add_timer(&tp->timer); + tg3_flag_set(tp, INIT_COMPLETE); + tg3_enable_ints(tp); + + tg3_full_unlock(tp); + + netif_tx_start_all_queues(dev); + + /* + * Reset loopback feature if it was turned on while the device was down + * make sure that it's installed properly now. + */ + if (dev->features & NETIF_F_LOOPBACK) + tg3_set_loopback(dev, dev->features); + + return 0; + + err_out3: + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + + err_out2: + tg3_napi_disable(tp); + tg3_napi_fini(tp); + tg3_free_consistent(tp); + + err_out1: + tg3_ints_fini(tp); + tg3_frob_aux_power(tp, false); + pci_set_power_state(tp->pdev, PCI_D3hot); + return err; + } + + static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, + struct rtnl_link_stats64 *); + static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); + + static int tg3_close(struct net_device *dev) + { + int i; + struct tg3 *tp = netdev_priv(dev); + + tg3_napi_disable(tp); + cancel_work_sync(&tp->reset_task); + + netif_tx_stop_all_queues(dev); + + del_timer_sync(&tp->timer); + + tg3_phy_stop(tp); + + tg3_full_lock(tp, 1); + + tg3_disable_ints(tp); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + + tg3_ints_fini(tp); + + tg3_get_stats64(tp->dev, &tp->net_stats_prev); + + memcpy(&tp->estats_prev, tg3_get_estats(tp), + sizeof(tp->estats_prev)); + + tg3_napi_fini(tp); + + tg3_free_consistent(tp); + + tg3_power_down(tp); + + netif_carrier_off(tp->dev); + + return 0; + } + + static inline u64 get_stat64(tg3_stat64_t *val) + { + return ((u64)val->high << 32) | ((u64)val->low); + } + + static u64 calc_crc_errors(struct tg3 *tp) + { + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 val; + + spin_lock_bh(&tp->lock); + if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { + tg3_writephy(tp, MII_TG3_TEST1, + val | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); + } else + val = 0; + spin_unlock_bh(&tp->lock); + + tp->phy_crc_errors += val; + + return tp->phy_crc_errors; + } + + return get_stat64(&hw_stats->rx_fcs_errors); + } + + #define ESTAT_ADD(member) \ + estats->member = old_estats->member + \ + get_stat64(&hw_stats->member) + + static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) + { + struct tg3_ethtool_stats *estats = &tp->estats; + struct tg3_ethtool_stats *old_estats = &tp->estats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!hw_stats) + return old_estats; + + ESTAT_ADD(rx_octets); + ESTAT_ADD(rx_fragments); + ESTAT_ADD(rx_ucast_packets); + ESTAT_ADD(rx_mcast_packets); + ESTAT_ADD(rx_bcast_packets); + ESTAT_ADD(rx_fcs_errors); + ESTAT_ADD(rx_align_errors); + ESTAT_ADD(rx_xon_pause_rcvd); + ESTAT_ADD(rx_xoff_pause_rcvd); + ESTAT_ADD(rx_mac_ctrl_rcvd); + ESTAT_ADD(rx_xoff_entered); + ESTAT_ADD(rx_frame_too_long_errors); + ESTAT_ADD(rx_jabbers); + ESTAT_ADD(rx_undersize_packets); + ESTAT_ADD(rx_in_length_errors); + ESTAT_ADD(rx_out_length_errors); + ESTAT_ADD(rx_64_or_less_octet_packets); + ESTAT_ADD(rx_65_to_127_octet_packets); + ESTAT_ADD(rx_128_to_255_octet_packets); + ESTAT_ADD(rx_256_to_511_octet_packets); + ESTAT_ADD(rx_512_to_1023_octet_packets); + ESTAT_ADD(rx_1024_to_1522_octet_packets); + ESTAT_ADD(rx_1523_to_2047_octet_packets); + ESTAT_ADD(rx_2048_to_4095_octet_packets); + ESTAT_ADD(rx_4096_to_8191_octet_packets); + ESTAT_ADD(rx_8192_to_9022_octet_packets); + + ESTAT_ADD(tx_octets); + ESTAT_ADD(tx_collisions); + ESTAT_ADD(tx_xon_sent); + ESTAT_ADD(tx_xoff_sent); + ESTAT_ADD(tx_flow_control); + ESTAT_ADD(tx_mac_errors); + ESTAT_ADD(tx_single_collisions); + ESTAT_ADD(tx_mult_collisions); + ESTAT_ADD(tx_deferred); + ESTAT_ADD(tx_excessive_collisions); + ESTAT_ADD(tx_late_collisions); + ESTAT_ADD(tx_collide_2times); + ESTAT_ADD(tx_collide_3times); + ESTAT_ADD(tx_collide_4times); + ESTAT_ADD(tx_collide_5times); + ESTAT_ADD(tx_collide_6times); + ESTAT_ADD(tx_collide_7times); + ESTAT_ADD(tx_collide_8times); + ESTAT_ADD(tx_collide_9times); + ESTAT_ADD(tx_collide_10times); + ESTAT_ADD(tx_collide_11times); + ESTAT_ADD(tx_collide_12times); + ESTAT_ADD(tx_collide_13times); + ESTAT_ADD(tx_collide_14times); + ESTAT_ADD(tx_collide_15times); + ESTAT_ADD(tx_ucast_packets); + ESTAT_ADD(tx_mcast_packets); + ESTAT_ADD(tx_bcast_packets); + ESTAT_ADD(tx_carrier_sense_errors); + ESTAT_ADD(tx_discards); + ESTAT_ADD(tx_errors); + + ESTAT_ADD(dma_writeq_full); + ESTAT_ADD(dma_write_prioq_full); + ESTAT_ADD(rxbds_empty); + ESTAT_ADD(rx_discards); + ESTAT_ADD(rx_errors); + ESTAT_ADD(rx_threshold_hit); + + ESTAT_ADD(dma_readq_full); + ESTAT_ADD(dma_read_prioq_full); + ESTAT_ADD(tx_comp_queue_full); + + ESTAT_ADD(ring_set_send_prod_index); + ESTAT_ADD(ring_status_update); + ESTAT_ADD(nic_irqs); + ESTAT_ADD(nic_avoided_irqs); + ESTAT_ADD(nic_tx_threshold_hit); + + ESTAT_ADD(mbuf_lwm_thresh_hit); + + return estats; + } + + static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) + { + struct tg3 *tp = netdev_priv(dev); + struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!hw_stats) + return old_stats; + + stats->rx_packets = old_stats->rx_packets + + get_stat64(&hw_stats->rx_ucast_packets) + + get_stat64(&hw_stats->rx_mcast_packets) + + get_stat64(&hw_stats->rx_bcast_packets); + + stats->tx_packets = old_stats->tx_packets + + get_stat64(&hw_stats->tx_ucast_packets) + + get_stat64(&hw_stats->tx_mcast_packets) + + get_stat64(&hw_stats->tx_bcast_packets); + + stats->rx_bytes = old_stats->rx_bytes + + get_stat64(&hw_stats->rx_octets); + stats->tx_bytes = old_stats->tx_bytes + + get_stat64(&hw_stats->tx_octets); + + stats->rx_errors = old_stats->rx_errors + + get_stat64(&hw_stats->rx_errors); + stats->tx_errors = old_stats->tx_errors + + get_stat64(&hw_stats->tx_errors) + + get_stat64(&hw_stats->tx_mac_errors) + + get_stat64(&hw_stats->tx_carrier_sense_errors) + + get_stat64(&hw_stats->tx_discards); + + stats->multicast = old_stats->multicast + + get_stat64(&hw_stats->rx_mcast_packets); + stats->collisions = old_stats->collisions + + get_stat64(&hw_stats->tx_collisions); + + stats->rx_length_errors = old_stats->rx_length_errors + + get_stat64(&hw_stats->rx_frame_too_long_errors) + + get_stat64(&hw_stats->rx_undersize_packets); + + stats->rx_over_errors = old_stats->rx_over_errors + + get_stat64(&hw_stats->rxbds_empty); + stats->rx_frame_errors = old_stats->rx_frame_errors + + get_stat64(&hw_stats->rx_align_errors); + stats->tx_aborted_errors = old_stats->tx_aborted_errors + + get_stat64(&hw_stats->tx_discards); + stats->tx_carrier_errors = old_stats->tx_carrier_errors + + get_stat64(&hw_stats->tx_carrier_sense_errors); + + stats->rx_crc_errors = old_stats->rx_crc_errors + + calc_crc_errors(tp); + + stats->rx_missed_errors = old_stats->rx_missed_errors + + get_stat64(&hw_stats->rx_discards); + + stats->rx_dropped = tp->rx_dropped; + + return stats; + } + + static inline u32 calc_crc(unsigned char *buf, int len) + { + u32 reg; + u32 tmp; + int j, k; + + reg = 0xffffffff; + + for (j = 0; j < len; j++) { + reg ^= buf[j]; + + for (k = 0; k < 8; k++) { + tmp = reg & 0x01; + + reg >>= 1; + + if (tmp) + reg ^= 0xedb88320; + } + } + + return ~reg; + } + + static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) + { + /* accept or reject all multicast frames */ + tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); + } + + static void __tg3_set_rx_mode(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + u32 rx_mode; + + rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | + RX_MODE_KEEP_VLAN_TAG); + + #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) + /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG + * flag clear. + */ + if (!tg3_flag(tp, ENABLE_ASF)) + rx_mode |= RX_MODE_KEEP_VLAN_TAG; + #endif + + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode. */ + rx_mode |= RX_MODE_PROMISC; + } else if (dev->flags & IFF_ALLMULTI) { + /* Accept all multicast. */ + tg3_set_multi(tp, 1); + } else if (netdev_mc_empty(dev)) { + /* Reject all multicast. */ + tg3_set_multi(tp, 0); + } else { + /* Accept one or more multicast(s). */ + struct netdev_hw_addr *ha; + u32 mc_filter[4] = { 0, }; + u32 regidx; + u32 bit; + u32 crc; + + netdev_for_each_mc_addr(ha, dev) { + crc = calc_crc(ha->addr, ETH_ALEN); + bit = ~crc & 0x7f; + regidx = (bit & 0x60) >> 5; + bit &= 0x1f; + mc_filter[regidx] |= (1 << bit); + } + + tw32(MAC_HASH_REG_0, mc_filter[0]); + tw32(MAC_HASH_REG_1, mc_filter[1]); + tw32(MAC_HASH_REG_2, mc_filter[2]); + tw32(MAC_HASH_REG_3, mc_filter[3]); + } + + if (rx_mode != tp->rx_mode) { + tp->rx_mode = rx_mode; + tw32_f(MAC_RX_MODE, rx_mode); + udelay(10); + } + } + + static void tg3_set_rx_mode(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + tg3_full_lock(tp, 0); + __tg3_set_rx_mode(dev); + tg3_full_unlock(tp); + } + + static int tg3_get_regs_len(struct net_device *dev) + { + return TG3_REG_BLK_SIZE; + } + + static void tg3_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *_p) + { + struct tg3 *tp = netdev_priv(dev); + + regs->version = 0; + + memset(_p, 0, TG3_REG_BLK_SIZE); + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return; + + tg3_full_lock(tp, 0); + + tg3_dump_legacy_regs(tp, (u32 *)_p); + + tg3_full_unlock(tp); + } + + static int tg3_get_eeprom_len(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + + return tp->nvram_size; + } + + static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) + { + struct tg3 *tp = netdev_priv(dev); + int ret; + u8 *pd; + u32 i, offset, len, b_offset, b_count; + __be32 val; + + if (tg3_flag(tp, NO_NVRAM)) + return -EINVAL; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return -EAGAIN; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + eeprom->magic = TG3_EEPROM_MAGIC; + + if (offset & 3) { + /* adjustments to start on required 4 byte boundary */ + b_offset = offset & 3; + b_count = 4 - b_offset; + if (b_count > len) { + /* i.e. offset=1 len=2 */ + b_count = len; + } + ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); + if (ret) + return ret; + memcpy(data, ((char *)&val) + b_offset, b_count); + len -= b_count; + offset += b_count; + eeprom->len += b_count; + } + + /* read bytes up to the last 4 byte boundary */ + pd = &data[eeprom->len]; + for (i = 0; i < (len - (len & 3)); i += 4) { + ret = tg3_nvram_read_be32(tp, offset + i, &val); + if (ret) { + eeprom->len += i; + return ret; + } + memcpy(pd + i, &val, 4); + } + eeprom->len += i; + + if (len & 3) { + /* read last bytes not ending on 4 byte boundary */ + pd = &data[eeprom->len]; + b_count = len & 3; + b_offset = offset + len - b_count; + ret = tg3_nvram_read_be32(tp, b_offset, &val); + if (ret) + return ret; + memcpy(pd, &val, b_count); + eeprom->len += b_count; + } + return 0; + } + + static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); + + static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) + { + struct tg3 *tp = netdev_priv(dev); + int ret; + u32 offset, len, b_offset, odd_len; + u8 *buf; + __be32 start, end; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return -EAGAIN; + + if (tg3_flag(tp, NO_NVRAM) || + eeprom->magic != TG3_EEPROM_MAGIC) + return -EINVAL; + + offset = eeprom->offset; + len = eeprom->len; + + if ((b_offset = (offset & 3))) { + /* adjustments to start on required 4 byte boundary */ + ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); + if (ret) + return ret; + len += b_offset; + offset &= ~3; + if (len < 4) + len = 4; + } + + odd_len = 0; + if (len & 3) { + /* adjustments to end on required 4 byte boundary */ + odd_len = 1; + len = (len + 3) & ~3; + ret = tg3_nvram_read_be32(tp, offset+len-4, &end); + if (ret) + return ret; + } + + buf = data; + if (b_offset || odd_len) { + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (b_offset) + memcpy(buf, &start, 4); + if (odd_len) + memcpy(buf+len-4, &end, 4); + memcpy(buf + b_offset, data, eeprom->len); + } + + ret = tg3_nvram_write_block(tp, offset, len, buf); + + if (buf != data) + kfree(buf); + + return ret; + } + + static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_gset(phydev, cmd); + } + + cmd->supported = (SUPPORTED_Autoneg); + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + cmd->supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + cmd->supported |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP); + cmd->port = PORT_TP; + } else { + cmd->supported |= SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } + + cmd->advertising = tp->link_config.advertising; + if (tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->link_config.flowctrl & FLOW_CTRL_RX) { + if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Pause; + } else { + cmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } + } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Asym_Pause; + } + } + if (netif_running(dev)) { + ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); + cmd->duplex = tp->link_config.active_duplex; + } else { + ethtool_cmd_speed_set(cmd, SPEED_INVALID); + cmd->duplex = DUPLEX_INVALID; + } + cmd->phy_address = tp->phy_addr; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = tp->link_config.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; + } + + static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct tg3 *tp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_sset(phydev, cmd); + } + + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE && + cmd->duplex != DUPLEX_FULL && + cmd->duplex != DUPLEX_HALF) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_ENABLE) { + u32 mask = ADVERTISED_Autoneg | + ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + mask |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + mask |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + mask |= ADVERTISED_FIBRE; + + if (cmd->advertising & ~mask) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + cmd->advertising &= mask; + } else { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { + if (speed != SPEED_1000) + return -EINVAL; + + if (cmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { + if (speed != SPEED_100 && + speed != SPEED_10) + return -EINVAL; + } + } + + tg3_full_lock(tp, 0); + + tp->link_config.autoneg = cmd->autoneg; + if (cmd->autoneg == AUTONEG_ENABLE) { + tp->link_config.advertising = (cmd->advertising | + ADVERTISED_Autoneg); + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + } else { + tp->link_config.advertising = 0; + tp->link_config.speed = speed; + tp->link_config.duplex = cmd->duplex; + } + + tp->link_config.orig_speed = tp->link_config.speed; + tp->link_config.orig_duplex = tp->link_config.duplex; + tp->link_config.orig_autoneg = tp->link_config.autoneg; + + if (netif_running(dev)) + tg3_setup_phy(tp, 1); + + tg3_full_unlock(tp); + + return 0; + } + + static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) + { + struct tg3 *tp = netdev_priv(dev); + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + strcpy(info->fw_version, tp->fw_ver); + strcpy(info->bus_info, pci_name(tp->pdev)); + } + + static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + wol->wolopts = 0; + if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) + wol->wolopts = WAKE_MAGIC; + memset(&wol->sopass, 0, sizeof(wol->sopass)); + } + + static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct tg3 *tp = netdev_priv(dev); + struct device *dp = &tp->pdev->dev; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + if ((wol->wolopts & WAKE_MAGIC) && + !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) + return -EINVAL; + + device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); + + spin_lock_bh(&tp->lock); + if (device_may_wakeup(dp)) + tg3_flag_set(tp, WOL_ENABLE); + else + tg3_flag_clear(tp, WOL_ENABLE); + spin_unlock_bh(&tp->lock); + + return 0; + } + + static u32 tg3_get_msglevel(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + return tp->msg_enable; + } + + static void tg3_set_msglevel(struct net_device *dev, u32 value) + { + struct tg3 *tp = netdev_priv(dev); + tp->msg_enable = value; + } + + static int tg3_nway_reset(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + int r; + + if (!netif_running(dev)) + return -EAGAIN; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + return -EINVAL; + + if (tg3_flag(tp, USE_PHYLIB)) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + } else { + u32 bmcr; + + spin_lock_bh(&tp->lock); + r = -EINVAL; + tg3_readphy(tp, MII_BMCR, &bmcr); + if (!tg3_readphy(tp, MII_BMCR, &bmcr) && + ((bmcr & BMCR_ANENABLE) || + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | + BMCR_ANENABLE); + r = 0; + } + spin_unlock_bh(&tp->lock); + } + + return r; + } + + static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) + { + struct tg3 *tp = netdev_priv(dev); + + ering->rx_max_pending = tp->rx_std_ring_mask; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; + else + ering->rx_jumbo_max_pending = 0; + + ering->tx_max_pending = TG3_TX_RING_SIZE - 1; + + ering->rx_pending = tp->rx_pending; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_pending = tp->rx_jumbo_pending; + else + ering->rx_jumbo_pending = 0; + + ering->tx_pending = tp->napi[0].tx_pending; + } + + static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) + { + struct tg3 *tp = netdev_priv(dev); + int i, irq_sync = 0, err = 0; + + if ((ering->rx_pending > tp->rx_std_ring_mask) || + (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || + (ering->tx_pending > TG3_TX_RING_SIZE - 1) || + (ering->tx_pending <= MAX_SKB_FRAGS) || + (tg3_flag(tp, TSO_BUG) && + (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) + return -EINVAL; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tp->rx_pending = ering->rx_pending; + + if (tg3_flag(tp, MAX_RXPEND_64) && + tp->rx_pending > 63) + tp->rx_pending = 63; + tp->rx_jumbo_pending = ering->rx_jumbo_pending; + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].tx_pending = ering->tx_pending; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err) + tg3_phy_start(tp); + + return err; + } + + static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) + { + struct tg3 *tp = netdev_priv(dev); + + epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); + + if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) + epause->rx_pause = 1; + else + epause->rx_pause = 0; + + if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) + epause->tx_pause = 1; + else + epause->tx_pause = 0; + } + + static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) + { + struct tg3 *tp = netdev_priv(dev); + int err = 0; + + if (tg3_flag(tp, USE_PHYLIB)) { + u32 newadv; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + (epause->rx_pause != epause->tx_pause))) + return -EINVAL; + + tp->link_config.flowctrl = 0; + if (epause->rx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_RX; + + if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Pause; + } else + newadv = ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } else if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Asym_Pause; + } else + newadv = 0; + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + u32 oldadv = phydev->advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (oldadv != newadv) { + phydev->advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + phydev->advertising |= newadv; + if (phydev->autoneg) { + /* + * Always renegotiate the link to + * inform our link partner of our + * flow control settings, even if the + * flow control is forced. Let + * tg3_adjust_link() do the final + * flow control setup. + */ + return phy_start_aneg(phydev); + } + } + + if (!epause->autoneg) + tg3_setup_flow_control(tp, 0, 0); + } else { + tp->link_config.orig_advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + tp->link_config.orig_advertising |= newadv; + } + } else { + int irq_sync = 0; + + if (netif_running(dev)) { + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + if (epause->rx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_RX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_RX; + if (epause->tx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_TX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_TX; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + } + + return err; + } + + static int tg3_get_sset_count(struct net_device *dev, int sset) + { + switch (sset) { + case ETH_SS_TEST: + return TG3_NUM_TEST; + case ETH_SS_STATS: + return TG3_NUM_STATS; + default: + return -EOPNOTSUPP; + } + } + + static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) + { + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + case ETH_SS_TEST: + memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); + break; + default: + WARN_ON(1); /* we need a WARN() */ + break; + } + } + + static int tg3_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) + { + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(tp->dev)) + return -EAGAIN; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON | + LED_CTRL_100MBPS_ON | + LED_CTRL_10MBPS_ON | + LED_CTRL_TRAFFIC_OVERRIDE | + LED_CTRL_TRAFFIC_BLINK | + LED_CTRL_TRAFFIC_LED); + break; + + case ETHTOOL_ID_OFF: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE); + break; + + case ETHTOOL_ID_INACTIVE: + tw32(MAC_LED_CTRL, tp->led_ctrl); + break; + } + + return 0; + } + + static void tg3_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *tmp_stats) + { + struct tg3 *tp = netdev_priv(dev); + memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); + } + + static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) + { + int i; + __be32 *buf; + u32 offset = 0, len = 0; + u32 magic, val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return NULL; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == + TG3_NVM_DIRTYPE_EXTVPD) + break; + } + + if (offset != TG3_NVM_DIR_END) { + len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; + if (tg3_nvram_read(tp, offset + 4, &offset)) + return NULL; + + offset = tg3_nvram_logical_addr(tp, offset); + } + } + + if (!offset || !len) { + offset = TG3_NVM_VPD_OFF; + len = TG3_NVM_VPD_LEN; + } + + buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (i = 0; i < len; i += 4) { + /* The data is in little-endian format in NVRAM. + * Use the big-endian read routines to preserve + * the byte order as it exists in NVRAM. + */ + if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) + goto error; + } + } else { + u8 *ptr; + ssize_t cnt; + unsigned int pos = 0; + + ptr = (u8 *)&buf[0]; + for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { + cnt = pci_read_vpd(tp->pdev, pos, + len - pos, ptr); + if (cnt == -ETIMEDOUT || cnt == -EINTR) + cnt = 0; + else if (cnt < 0) + goto error; + } + if (pos != len) + goto error; + } + + *vpdlen = len; + + return buf; + + error: + kfree(buf); + return NULL; + } + + #define NVRAM_TEST_SIZE 0x100 + #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 + #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 + #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c + #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 + #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 + #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 + #define NVRAM_SELFBOOT_HW_SIZE 0x20 + #define NVRAM_SELFBOOT_DATA_SIZE 0x1c + + static int tg3_test_nvram(struct tg3 *tp) + { + u32 csum, magic, len; + __be32 *buf; + int i, j, k, err = 0, size; + + if (tg3_flag(tp, NO_NVRAM)) + return 0; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return -EIO; + + if (magic == TG3_EEPROM_MAGIC) + size = NVRAM_TEST_SIZE; + else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { + if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == + TG3_EEPROM_SB_FORMAT_1) { + switch (magic & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; + break; + case TG3_EEPROM_SB_REVISION_2: + size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; + break; + case TG3_EEPROM_SB_REVISION_3: + size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; + break; + case TG3_EEPROM_SB_REVISION_4: + size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; + break; + case TG3_EEPROM_SB_REVISION_5: + size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; + break; + case TG3_EEPROM_SB_REVISION_6: + size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; + break; + default: + return -EIO; + } + } else + return 0; + } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + size = NVRAM_SELFBOOT_HW_SIZE; + else + return -EIO; + + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + err = -EIO; + for (i = 0, j = 0; i < size; i += 4, j++) { + err = tg3_nvram_read_be32(tp, i, &buf[j]); + if (err) + break; + } + if (i < size) + goto out; + + /* Selfboot format */ + magic = be32_to_cpu(buf[0]); + if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == + TG3_EEPROM_MAGIC_FW) { + u8 *buf8 = (u8 *) buf, csum8 = 0; + + if ((magic & TG3_EEPROM_SB_REVISION_MASK) == + TG3_EEPROM_SB_REVISION_2) { + /* For rev 2, the csum doesn't include the MBA. */ + for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) + csum8 += buf8[i]; + for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) + csum8 += buf8[i]; + } else { + for (i = 0; i < size; i++) + csum8 += buf8[i]; + } + + if (csum8 == 0) { + err = 0; + goto out; + } + + err = -EIO; + goto out; + } + + if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == + TG3_EEPROM_MAGIC_HW) { + u8 data[NVRAM_SELFBOOT_DATA_SIZE]; + u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; + u8 *buf8 = (u8 *) buf; + + /* Separate the parity bits and the data bytes. */ + for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { + if ((i == 0) || (i == 8)) { + int l; + u8 msk; + + for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } else if (i == 16) { + int l; + u8 msk; + + for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + + for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } + data[j++] = buf8[i]; + } + + err = -EIO; + for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { + u8 hw8 = hweight8(data[i]); + + if ((hw8 & 0x1) && parity[i]) + goto out; + else if (!(hw8 & 0x1) && !parity[i]) + goto out; + } + err = 0; + goto out; + } + + err = -EIO; + + /* Bootstrap checksum at offset 0x10 */ + csum = calc_crc((unsigned char *) buf, 0x10); + if (csum != le32_to_cpu(buf[0x10/4])) + goto out; + + /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ + csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); + if (csum != le32_to_cpu(buf[0xfc/4])) + goto out; + + kfree(buf); + + buf = tg3_vpd_readblock(tp, &len); + if (!buf) + return -ENOMEM; + + i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); + if (i > 0) { + j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); + if (j < 0) + goto out; + + if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) + goto out; + + i += PCI_VPD_LRDT_TAG_SIZE; + j = pci_vpd_find_info_keyword((u8 *)buf, i, j, + PCI_VPD_RO_KEYWORD_CHKSUM); + if (j > 0) { + u8 csum8 = 0; + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + + for (i = 0; i <= j; i++) + csum8 += ((u8 *)buf)[i]; + + if (csum8) + goto out; + } + } + + err = 0; + + out: + kfree(buf); + return err; + } + + #define TG3_SERDES_TIMEOUT_SEC 2 + #define TG3_COPPER_TIMEOUT_SEC 6 + + static int tg3_test_link(struct tg3 *tp) + { + int i, max; + + if (!netif_running(tp->dev)) + return -ENODEV; + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + max = TG3_SERDES_TIMEOUT_SEC; + else + max = TG3_COPPER_TIMEOUT_SEC; + + for (i = 0; i < max; i++) { + if (netif_carrier_ok(tp->dev)) + return 0; + + if (msleep_interruptible(1000)) + break; + } + + return -EIO; + } + + /* Only test the commonly used registers */ + static int tg3_test_registers(struct tg3 *tp) + { + int i, is_5705, is_5750; + u32 offset, read_mask, write_mask, val, save_val, read_val; + static struct { + u16 offset; + u16 flags; + #define TG3_FL_5705 0x1 + #define TG3_FL_NOT_5705 0x2 + #define TG3_FL_NOT_5788 0x4 + #define TG3_FL_NOT_5750 0x8 + u32 read_mask; + u32 write_mask; + } reg_tbl[] = { + /* MAC Control Registers */ + { MAC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00ef6f8c }, + { MAC_MODE, TG3_FL_5705, + 0x00000000, 0x01ef6b8c }, + { MAC_STATUS, TG3_FL_NOT_5705, + 0x03800107, 0x00000000 }, + { MAC_STATUS, TG3_FL_5705, + 0x03800100, 0x00000000 }, + { MAC_ADDR_0_HIGH, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_ADDR_0_LOW, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_RX_MTU_SIZE, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_TX_MODE, 0x0000, + 0x00000000, 0x00000070 }, + { MAC_TX_LENGTHS, 0x0000, + 0x00000000, 0x00003fff }, + { MAC_RX_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x000007fc }, + { MAC_RX_MODE, TG3_FL_5705, + 0x00000000, 0x000007dc }, + { MAC_HASH_REG_0, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_1, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_2, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_3, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive Data and Receive BD Initiator Control Registers. */ + { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, + 0x00000000, 0x00000003 }, + { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+0, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+4, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+8, 0x0000, + 0x00000000, 0xffff0002 }, + { RCVDBDI_STD_BD+0xc, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive BD Initiator Control Registers. */ + { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVBDI_STD_THRESH, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + + /* Host Coalescing Control Registers. */ + { HOSTCC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00000004 }, + { HOSTCC_MODE, TG3_FL_5705, + 0x00000000, 0x000000f6 }, + { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + + /* Buffer Manager Control Registers. */ + { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, + 0x00000000, 0x007fff80 }, + { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, + 0x00000000, 0x007fffff }, + { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, + 0x00000000, 0x0000003f }, + { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_MB_HIGH_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + + /* Mailbox Registers */ + { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, + 0x00000000, 0x000007ff }, + { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, + 0x00000000, 0x000001ff }, + + { 0xffff, 0x0000, 0x00000000, 0x00000000 }, + }; + + is_5705 = is_5750 = 0; + if (tg3_flag(tp, 5705_PLUS)) { + is_5705 = 1; + if (tg3_flag(tp, 5750_PLUS)) + is_5750 = 1; + } + + for (i = 0; reg_tbl[i].offset != 0xffff; i++) { + if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) + continue; + + if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) + continue; + + if (tg3_flag(tp, IS_5788) && + (reg_tbl[i].flags & TG3_FL_NOT_5788)) + continue; + + if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) + continue; + + offset = (u32) reg_tbl[i].offset; + read_mask = reg_tbl[i].read_mask; + write_mask = reg_tbl[i].write_mask; + + /* Save the original register content */ + save_val = tr32(offset); + + /* Determine the read-only value. */ + read_val = save_val & read_mask; + + /* Write zero to the register, then make sure the read-only bits + * are not changed and the read/write bits are all zeros. + */ + tw32(offset, 0); + + val = tr32(offset); + + /* Test the read-only and read/write bits. */ + if (((val & read_mask) != read_val) || (val & write_mask)) + goto out; + + /* Write ones to all the bits defined by RdMask and WrMask, then + * make sure the read-only bits are not changed and the + * read/write bits are all ones. + */ + tw32(offset, read_mask | write_mask); + + val = tr32(offset); + + /* Test the read-only bits. */ + if ((val & read_mask) != read_val) + goto out; + + /* Test the read/write bits. */ + if ((val & write_mask) != write_mask) + goto out; + + tw32(offset, save_val); + } + + return 0; + + out: + if (netif_msg_hw(tp)) + netdev_err(tp->dev, + "Register test failed at offset %x\n", offset); + tw32(offset, save_val); + return -EIO; + } + + static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) + { + static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; + int i; + u32 j; + + for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { + for (j = 0; j < len; j += 4) { + u32 val; + + tg3_write_mem(tp, offset + j, test_pattern[i]); + tg3_read_mem(tp, offset + j, &val); + if (val != test_pattern[i]) + return -EIO; + } + } + return 0; + } + + static int tg3_test_memory(struct tg3 *tp) + { + static struct mem_entry { + u32 offset; + u32 len; + } mem_tbl_570x[] = { + { 0x00000000, 0x00b50}, + { 0x00002000, 0x1c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5705[] = { + { 0x00000100, 0x0000c}, + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x01000}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0e000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5755[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x00800}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5906[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00400}, + { 0x00006000, 0x00400}, + { 0x00008000, 0x01000}, + { 0x00010000, 0x01000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5717[] = { + { 0x00000200, 0x00008}, + { 0x00010000, 0x0a000}, + { 0x00020000, 0x13c00}, + { 0xffffffff, 0x00000} + }, mem_tbl_57765[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x09800}, + { 0x00010000, 0x0a000}, + { 0xffffffff, 0x00000} + }; + struct mem_entry *mem_tbl; + int err = 0; + int i; + + if (tg3_flag(tp, 5717_PLUS)) + mem_tbl = mem_tbl_5717; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + mem_tbl = mem_tbl_57765; + else if (tg3_flag(tp, 5755_PLUS)) + mem_tbl = mem_tbl_5755; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mem_tbl = mem_tbl_5906; + else if (tg3_flag(tp, 5705_PLUS)) + mem_tbl = mem_tbl_5705; + else + mem_tbl = mem_tbl_570x; + + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { + err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); + if (err) + break; + } + + return err; + } + + #define TG3_TSO_MSS 500 + + #define TG3_TSO_IP_HDR_LEN 20 + #define TG3_TSO_TCP_HDR_LEN 20 + #define TG3_TSO_TCP_OPT_LEN 12 + + static const u8 tg3_tso_header[] = { + 0x08, 0x00, + 0x45, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x40, 0x00, + 0x40, 0x06, 0x00, 0x00, + 0x0a, 0x00, 0x00, 0x01, + 0x0a, 0x00, 0x00, 0x02, + 0x0d, 0x00, 0xe0, 0x00, + 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x02, 0x00, + 0x80, 0x10, 0x10, 0x00, + 0x14, 0x09, 0x00, 0x00, + 0x01, 0x01, 0x08, 0x0a, + 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, + }; + + static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) + { + u32 rx_start_idx, rx_idx, tx_idx, opaque_key; + u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; + u32 budget; + struct sk_buff *skb, *rx_skb; + u8 *tx_data; + dma_addr_t map; + int num_pkts, tx_len, rx_len, i, err; + struct tg3_rx_buffer_desc *desc; + struct tg3_napi *tnapi, *rnapi; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tnapi = &tp->napi[0]; + rnapi = &tp->napi[0]; + if (tp->irq_cnt > 1) { + if (tg3_flag(tp, ENABLE_RSS)) + rnapi = &tp->napi[1]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi = &tp->napi[1]; + } + coal_now = tnapi->coal_now | rnapi->coal_now; + + err = -EIO; + + tx_len = pktsz; + skb = netdev_alloc_skb(tp->dev, tx_len); + if (!skb) + return -ENOMEM; + + tx_data = skb_put(skb, tx_len); + memcpy(tx_data, tp->dev->dev_addr, 6); + memset(tx_data + 6, 0x0, 8); + + tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); + + if (tso_loopback) { + struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; + + u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + + TG3_TSO_TCP_OPT_LEN; + + memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, + sizeof(tg3_tso_header)); + mss = TG3_TSO_MSS; + + val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); + num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); + + /* Set the total length field in the IP header */ + iph->tot_len = htons((u16)(mss + hdr_len)); + + base_flags = (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + struct tcphdr *th; + val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; + th = (struct tcphdr *)&tx_data[val]; + th->check = 0; + } else + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + mss |= (TG3_TSO_TCP_OPT_LEN << 9); + } else { + base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); + } + + data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); + } else { + num_pkts = 1; + data_off = ETH_HLEN; + } + + for (i = data_off; i < tx_len; i++) + tx_data[i] = (u8) (i & 0xff); + + map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, map)) { + dev_kfree_skb(skb); + return -EIO; + } + + val = tnapi->tx_prod; + tnapi->tx_buffers[val].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + rnapi->coal_now); + + udelay(10); + + rx_start_idx = rnapi->hw_status->idx[0].rx_producer; + + budget = tg3_tx_avail(tnapi); + if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, + base_flags | TXD_FLAG_END, mss, 0)) { + tnapi->tx_buffers[val].skb = NULL; + dev_kfree_skb(skb); + return -EIO; + } + + tnapi->tx_prod++; + + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + tr32_mailbox(tnapi->prodmbox); + + udelay(10); + + /* 350 usec to allow enough time on some 10/100 Mbps devices. */ + for (i = 0; i < 35; i++) { + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + coal_now); + + udelay(10); + + tx_idx = tnapi->hw_status->idx[0].tx_consumer; + rx_idx = rnapi->hw_status->idx[0].rx_producer; + if ((tx_idx == tnapi->tx_prod) && + (rx_idx == (rx_start_idx + num_pkts))) + break; + } + + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); + dev_kfree_skb(skb); + + if (tx_idx != tnapi->tx_prod) + goto out; + + if (rx_idx != rx_start_idx + num_pkts) + goto out; + + val = data_off; + while (rx_idx != rx_start_idx) { + desc = &rnapi->rx_rcb[rx_start_idx++]; + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) + goto out; + + rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) + - ETH_FCS_LEN; + + if (!tso_loopback) { + if (rx_len != tx_len) + goto out; + + if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { + if (opaque_key != RXD_OPAQUE_RING_STD) + goto out; + } else { + if (opaque_key != RXD_OPAQUE_RING_JUMBO) + goto out; + } + } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT != 0xffff) { + goto out; + } + + if (opaque_key == RXD_OPAQUE_RING_STD) { + rx_skb = tpr->rx_std_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], + mapping); + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], + mapping); + } else + goto out; + + pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, + PCI_DMA_FROMDEVICE); + + for (i = data_off; i < rx_len; i++, val++) { + if (*(rx_skb->data + i) != (u8) (val & 0xff)) + goto out; + } + } + + err = 0; + + /* tg3_free_rings will unmap and free the rx_skb */ + out: + return err; + } + + #define TG3_STD_LOOPBACK_FAILED 1 + #define TG3_JMB_LOOPBACK_FAILED 2 + #define TG3_TSO_LOOPBACK_FAILED 4 + #define TG3_LOOPBACK_FAILED \ + (TG3_STD_LOOPBACK_FAILED | \ + TG3_JMB_LOOPBACK_FAILED | \ + TG3_TSO_LOOPBACK_FAILED) + + static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) + { + int err = -EIO; + u32 eee_cap; + + eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + + if (!netif_running(tp->dev)) { + data[0] = TG3_LOOPBACK_FAILED; + data[1] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[2] = TG3_LOOPBACK_FAILED; + goto done; + } + + err = tg3_reset_hw(tp, 1); + if (err) { + data[0] = TG3_LOOPBACK_FAILED; + data[1] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[2] = TG3_LOOPBACK_FAILED; + goto done; + } + + if (tg3_flag(tp, ENABLE_RSS)) { + int i; + + /* Reroute all rx packets to the 1st queue */ + for (i = MAC_RSS_INDIR_TBL_0; + i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) + tw32(i, 0x0); + } + + /* HW errata - mac loopback fails in some cases on 5780. + * Normal traffic and PHY loopback are not affected by + * errata. Also, the MAC loopback test is deprecated for + * all newer ASIC revisions. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) { + tg3_mac_loopback(tp, true); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[0] |= TG3_STD_LOOPBACK_FAILED; + + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, false)) + data[0] |= TG3_JMB_LOOPBACK_FAILED; + + tg3_mac_loopback(tp, false); + } + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !tg3_flag(tp, USE_PHYLIB)) { + int i; + + tg3_phy_lpbk_set(tp, 0, false); + + /* Wait for link */ + for (i = 0; i < 100; i++) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + break; + mdelay(1); + } + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[1] |= TG3_STD_LOOPBACK_FAILED; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[1] |= TG3_TSO_LOOPBACK_FAILED; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, false)) + data[1] |= TG3_JMB_LOOPBACK_FAILED; + + if (do_extlpbk) { + tg3_phy_lpbk_set(tp, 0, true); + + /* All link indications report up, but the hardware + * isn't really ready for about 20 msec. Double it + * to be sure. + */ + mdelay(40); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[2] |= TG3_STD_LOOPBACK_FAILED; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[2] |= TG3_TSO_LOOPBACK_FAILED; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, false)) + data[2] |= TG3_JMB_LOOPBACK_FAILED; + } + + /* Re-enable gphy autopowerdown. */ + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + } + + err = (data[0] | data[1] | data[2]) ? -EIO : 0; + + done: + tp->phy_flags |= eee_cap; + + return err; + } + + static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, + u64 *data) + { + struct tg3 *tp = netdev_priv(dev); + bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; + + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && + tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + + memset(data, 0, sizeof(u64) * TG3_NUM_TEST); + + if (tg3_test_nvram(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[0] = 1; + } + if (!doextlpbk && tg3_test_link(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + data[1] = 1; + } + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int err, err2 = 0, irq_sync = 0; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tg3_halt(tp, RESET_KIND_SUSPEND, 1); + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!tg3_flag(tp, 5705_PLUS)) + tg3_halt_cpu(tp, TX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + tg3_phy_reset(tp); + + if (tg3_test_registers(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[2] = 1; + } + + if (tg3_test_memory(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[3] = 1; + } + + if (doextlpbk) + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + + if (tg3_test_loopback(tp, &data[4], doextlpbk)) + etest->flags |= ETH_TEST_FL_FAILED; + + tg3_full_unlock(tp); + + if (tg3_test_interrupt(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[7] = 1; + } + + tg3_full_lock(tp, 0); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + if (netif_running(dev)) { + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, 1); + if (!err2) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err2) + tg3_phy_start(tp); + } + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + tg3_power_down(tp); + + } + + static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + { + struct mii_ioctl_data *data = if_mii(ifr); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_mii_ioctl(phydev, ifr, cmd); + } + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = tp->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u32 mii_regval; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); + spin_unlock_bh(&tp->lock); + + data->val_out = mii_regval; + + return err; + } + + case SIOCSMIIREG: + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); + spin_unlock_bh(&tp->lock); + + return err; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; + } + + static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) + { + struct tg3 *tp = netdev_priv(dev); + + memcpy(ec, &tp->coal, sizeof(*ec)); + return 0; + } + + static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) + { + struct tg3 *tp = netdev_priv(dev); + u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; + u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; + + if (!tg3_flag(tp, 5705_PLUS)) { + max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; + max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; + max_stat_coal_ticks = MAX_STAT_COAL_TICKS; + min_stat_coal_ticks = MIN_STAT_COAL_TICKS; + } + + if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || + (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || + (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || + (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || + (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || + (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || + (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || + (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || + (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || + (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) + return -EINVAL; + + /* No rx interrupts will be generated if both are zero */ + if ((ec->rx_coalesce_usecs == 0) && + (ec->rx_max_coalesced_frames == 0)) + return -EINVAL; + + /* No tx interrupts will be generated if both are zero */ + if ((ec->tx_coalesce_usecs == 0) && + (ec->tx_max_coalesced_frames == 0)) + return -EINVAL; + + /* Only copy relevant parameters, ignore all others. */ + tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; + tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; + tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; + tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; + tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; + tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; + tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; + + if (netif_running(dev)) { + tg3_full_lock(tp, 0); + __tg3_set_coalesce(tp, &tp->coal); + tg3_full_unlock(tp); + } + return 0; + } + + static const struct ethtool_ops tg3_ethtool_ops = { + .get_settings = tg3_get_settings, + .set_settings = tg3_set_settings, + .get_drvinfo = tg3_get_drvinfo, + .get_regs_len = tg3_get_regs_len, + .get_regs = tg3_get_regs, + .get_wol = tg3_get_wol, + .set_wol = tg3_set_wol, + .get_msglevel = tg3_get_msglevel, + .set_msglevel = tg3_set_msglevel, + .nway_reset = tg3_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = tg3_get_eeprom_len, + .get_eeprom = tg3_get_eeprom, + .set_eeprom = tg3_set_eeprom, + .get_ringparam = tg3_get_ringparam, + .set_ringparam = tg3_set_ringparam, + .get_pauseparam = tg3_get_pauseparam, + .set_pauseparam = tg3_set_pauseparam, + .self_test = tg3_self_test, + .get_strings = tg3_get_strings, + .set_phys_id = tg3_set_phys_id, + .get_ethtool_stats = tg3_get_ethtool_stats, + .get_coalesce = tg3_get_coalesce, + .set_coalesce = tg3_set_coalesce, + .get_sset_count = tg3_get_sset_count, + }; + + static void __devinit tg3_get_eeprom_size(struct tg3 *tp) + { + u32 cursize, val, magic; + + tp->nvram_size = EEPROM_CHIP_SIZE; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return; + + if ((magic != TG3_EEPROM_MAGIC) && + ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && + ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) + return; + + /* + * Size the chip by reading offsets at increasing powers of two. + * When we encounter our validation signature, we know the addressing + * has wrapped around, and thus have our chip size. + */ + cursize = 0x10; + + while (cursize < tp->nvram_size) { + if (tg3_nvram_read(tp, cursize, &val) != 0) + return; + + if (val == magic) + break; + + cursize <<= 1; + } + + tp->nvram_size = cursize; + } + + static void __devinit tg3_get_nvram_size(struct tg3 *tp) + { + u32 val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) + return; + + /* Selfboot format */ + if (val != TG3_EEPROM_MAGIC) { + tg3_get_eeprom_size(tp); + return; + } + + if (tg3_nvram_read(tp, 0xf0, &val) == 0) { + if (val != 0) { + /* This is confusing. We want to operate on the + * 16-bit value at offset 0xf2. The tg3_nvram_read() + * call will read from NVRAM and byteswap the data + * according to the byteswapping settings for all + * other register accesses. This ensures the data we + * want will always reside in the lower 16-bits. + * However, the data in NVRAM is in LE format, which + * means the data from the NVRAM read will always be + * opposite the endianness of the CPU. The 16-bit + * byteswap then brings the data to CPU endianness. + */ + tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; + return; + } + } + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + } + + static void __devinit tg3_get_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { + tg3_flag_set(tp, FLASH); + } else { + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + tg3_flag(tp, 5780_CLASS)) { + switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { + case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; + break; + case FLASH_VENDOR_ATMEL_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ST: + tp->nvram_jedecnum = JEDEC_ST; + tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_SAIFUN: + tp->nvram_jedecnum = JEDEC_SAIFUN; + tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; + break; + case FLASH_VENDOR_SST_SMALL: + case FLASH_VENDOR_SST_LARGE: + tp->nvram_jedecnum = JEDEC_SST; + tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; + break; + } + } else { + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + } + } + + static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) + { + switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { + case FLASH_5752PAGE_SIZE_256: + tp->nvram_pagesize = 256; + break; + case FLASH_5752PAGE_SIZE_512: + tp->nvram_pagesize = 512; + break; + case FLASH_5752PAGE_SIZE_1K: + tp->nvram_pagesize = 1024; + break; + case FLASH_5752PAGE_SIZE_2K: + tp->nvram_pagesize = 2048; + break; + case FLASH_5752PAGE_SIZE_4K: + tp->nvram_pagesize = 4096; + break; + case FLASH_5752PAGE_SIZE_264: + tp->nvram_pagesize = 264; + break; + case FLASH_5752PAGE_SIZE_528: + tp->nvram_pagesize = 528; + break; + } + } + + static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) + tg3_flag_set(tp, PROTECTED_NVRAM); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + } + + if (tg3_flag(tp, FLASH)) { + tg3_nvram_get_pagesize(tp, nvcfg1); + } else { + /* For eeprom, set pagesize to maximum eeprom size */ + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } + } + + static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) + { + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_5: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || + nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) + tp->nvram_size = (protect ? 0x3e200 : + TG3_NVRAM_SIZE_512KB); + else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_128KB); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_128KB); + else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_128KB : + TG3_NVRAM_SIZE_512KB); + break; + } + } + + static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } + } + + static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) + { + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } + + if (protect) { + tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); + } else { + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = TG3_NVRAM_SIZE_2MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + } + } + } + + static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) + { + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + } + + static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ST_M45PE10: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_5752VENDOR_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + } + + + static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_EEPROM: + case FLASH_5717VENDOR_MICRO_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5717VENDOR_ATMEL_MDB011D: + case FLASH_5717VENDOR_ATMEL_ADB011B: + case FLASH_5717VENDOR_ATMEL_ADB011D: + case FLASH_5717VENDOR_ATMEL_MDB021D: + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + case FLASH_5717VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_MDB021D: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5717VENDOR_ST_M_M25PE10: + case FLASH_5717VENDOR_ST_A_M25PE10: + case FLASH_5717VENDOR_ST_M_M45PE10: + case FLASH_5717VENDOR_ST_A_M45PE10: + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + case FLASH_5717VENDOR_ST_25USPT: + case FLASH_5717VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + } + + static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) + { + u32 nvcfg1, nvmpinstrp; + + nvcfg1 = tr32(NVRAM_CFG1); + nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; + + switch (nvmpinstrp) { + case FLASH_5720_EEPROM_HD: + case FLASH_5720_EEPROM_LD: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + if (nvmpinstrp == FLASH_5720_EEPROM_HD) + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + else + tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; + return; + case FLASH_5720VENDOR_M_ATMEL_DB011D: + case FLASH_5720VENDOR_A_ATMEL_DB011B: + case FLASH_5720VENDOR_A_ATMEL_DB011D: + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + case FLASH_5720VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5720VENDOR_M_ST_M25PE10: + case FLASH_5720VENDOR_M_ST_M45PE10: + case FLASH_5720VENDOR_A_ST_M25PE10: + case FLASH_5720VENDOR_A_ST_M45PE10: + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + case FLASH_5720VENDOR_ST_25USPT: + case FLASH_5720VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + } + + /* Chips other than 5700/5701 use the NVRAM for fetching info. */ + static void __devinit tg3_nvram_init(struct tg3 *tp) + { + tw32_f(GRC_EEPROM_ADDR, + (EEPROM_ADDR_FSM_RESET | + (EEPROM_DEFAULT_CLOCK_PERIOD << + EEPROM_ADDR_CLKPERD_SHIFT))); + + msleep(1); + + /* Enable seeprom accesses. */ + tw32_f(GRC_LOCAL_CTRL, + tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); + udelay(100); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { + tg3_flag_set(tp, NVRAM); + + if (tg3_nvram_lock(tp)) { + netdev_warn(tp->dev, + "Cannot get nvram lock, %s failed\n", + __func__); + return; + } + tg3_enable_nvram_access(tp); + + tp->nvram_size = 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + tg3_get_5752_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tg3_get_5755_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_get_5787_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tg3_get_5761_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_get_5906_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_get_57780_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_get_5717_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_get_5720_nvram_info(tp); + else + tg3_get_nvram_info(tp); + + if (tp->nvram_size == 0) + tg3_get_nvram_size(tp); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + + } else { + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); + + tg3_get_eeprom_size(tp); + } + } + + static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, + u32 offset, u32 len, u8 *buf) + { + int i, j, rc = 0; + u32 val; + + for (i = 0; i < len; i += 4) { + u32 addr; + __be32 data; + + addr = offset + i; + + memcpy(&data, buf + i, 4); + + /* + * The SEEPROM interface expects the data to always be opposite + * the native endian format. We accomplish this by reversing + * all the operations that would have been performed on the + * data from a call to tg3_nvram_read_be32(). + */ + tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); + + val = tr32(GRC_EEPROM_ADDR); + tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); + + val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, val | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + (addr & EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_START | + EEPROM_ADDR_WRITE); + + for (j = 0; j < 1000; j++) { + val = tr32(GRC_EEPROM_ADDR); + + if (val & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(val & EEPROM_ADDR_COMPLETE)) { + rc = -EBUSY; + break; + } + } + + return rc; + } + + /* offset and length are dword aligned */ + static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) + { + int ret = 0; + u32 pagesize = tp->nvram_pagesize; + u32 pagemask = pagesize - 1; + u32 nvram_cmd; + u8 *tmp; + + tmp = kmalloc(pagesize, GFP_KERNEL); + if (tmp == NULL) + return -ENOMEM; + + while (len) { + int j; + u32 phy_addr, page_off, size; + + phy_addr = offset & ~pagemask; + + for (j = 0; j < pagesize; j += 4) { + ret = tg3_nvram_read_be32(tp, phy_addr + j, + (__be32 *) (tmp + j)); + if (ret) + break; + } + if (ret) + break; + + page_off = offset & pagemask; + size = pagesize; + if (len < size) + size = len; + + len -= size; + + memcpy(tmp + page_off, buf, size); + + offset = offset + (pagesize - page_off); + + tg3_enable_nvram_access(tp); + + /* + * Before we can erase the flash page, we need + * to issue a special "write enable" command. + */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Erase the target page */ + tw32(NVRAM_ADDR, phy_addr); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Issue another write enable to start the write. */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + for (j = 0; j < pagesize; j += 4) { + __be32 data; + + data = *((__be32 *) (tmp + j)); + + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + tw32(NVRAM_ADDR, phy_addr + j); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | + NVRAM_CMD_WR; + + if (j == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + else if (j == (pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) + break; + } + if (ret) + break; + } + + nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; + tg3_nvram_exec_cmd(tp, nvram_cmd); + + kfree(tmp); + + return ret; + } + + /* offset and length are dword aligned */ + static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) + { + int i, ret = 0; + + for (i = 0; i < len; i += 4, offset += 4) { + u32 page_off, phy_addr, nvram_cmd; + __be32 data; + + memcpy(&data, buf + i, 4); + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + page_off = offset % tp->nvram_pagesize; + + phy_addr = tg3_nvram_phys_addr(tp, offset); + + tw32(NVRAM_ADDR, phy_addr); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; + + if (page_off == 0 || i == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + if (page_off == (tp->nvram_pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if (i == (len - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && + !tg3_flag(tp, 5755_PLUS) && + (tp->nvram_jedecnum == JEDEC_ST) && + (nvram_cmd & NVRAM_CMD_FIRST)) { + + if ((ret = tg3_nvram_exec_cmd(tp, + NVRAM_CMD_WREN | NVRAM_CMD_GO | + NVRAM_CMD_DONE))) + + break; + } + if (!tg3_flag(tp, FLASH)) { + /* We always do complete word writes to eeprom. */ + nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); + } + + if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) + break; + } + return ret; + } + + /* offset and length are dword aligned */ + static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) + { + int ret; + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & + ~GRC_LCLCTRL_GPIO_OUTPUT1); + udelay(40); + } + + if (!tg3_flag(tp, NVRAM)) { + ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); + } else { + u32 grc_mode; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) + tw32(NVRAM_WRITE1, 0x406); + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); + + if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { + ret = tg3_nvram_write_block_buffered(tp, offset, len, + buf); + } else { + ret = tg3_nvram_write_block_unbuffered(tp, offset, len, + buf); + } + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + } + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(40); + } + + return ret; + } + + struct subsys_tbl_ent { + u16 subsys_vendor, subsys_devid; + u32 phy_id; + }; + + static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { + /* Broadcom boards. */ + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, + + /* 3com boards. */ + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, + + /* DELL boards. */ + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, + + /* Compaq boards. */ + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, + + /* IBM boards. */ + { TG3PCI_SUBVENDOR_ID_IBM, + TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } + }; + + static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) + { + int i; + + for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { + if ((subsys_id_to_phy_id[i].subsys_vendor == + tp->pdev->subsystem_vendor) && + (subsys_id_to_phy_id[i].subsys_devid == + tp->pdev->subsystem_device)) + return &subsys_id_to_phy_id[i]; + } + return NULL; + } + + static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) + { + u32 val; + + tp->phy_id = TG3_PHY_ID_INVALID; + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + /* Assume an onboard device and WOL capable by default. */ + tg3_flag_set(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, WOL_CAP); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + val = tr32(VCPU_CFGSHDW); + if (val & VCPU_CFGSHDW_ASPM_DBNC) + tg3_flag_set(tp, ASPM_WORKAROUND); + if ((val & VCPU_CFGSHDW_WOL_ENABLE) && + (val & VCPU_CFGSHDW_WOL_MAGPKT)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + goto done; + } + + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg, led_cfg; + u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; + int eeprom_phy_serdes = 0; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + tp->nic_sram_data_cfg = nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); + ver >>= NIC_SRAM_DATA_VER_SHIFT; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && + (ver > 0) && (ver < 0x100)) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); + + if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == + NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) + eeprom_phy_serdes = 1; + + tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); + if (nic_phy_id != 0) { + u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; + u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; + + eeprom_phy_id = (id1 >> 16) << 10; + eeprom_phy_id |= (id2 & 0xfc00) << 16; + eeprom_phy_id |= (id2 & 0x03ff) << 0; + } else + eeprom_phy_id = 0; + + tp->phy_id = eeprom_phy_id; + if (eeprom_phy_serdes) { + if (!tg3_flag(tp, 5705_PLUS)) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags |= TG3_PHYFLG_MII_SERDES; + } + + if (tg3_flag(tp, 5750_PLUS)) + led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | + SHASTA_EXT_LED_MODE_MASK); + else + led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; + + switch (led_cfg) { + default: + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_MAC: + tp->led_ctrl = LED_CTRL_MODE_MAC; + + /* Default to PHY_1_MODE if 0 (MAC_MODE) is + * read on some older 5700/5701 bootcode. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5701) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + break; + + case SHASTA_EXT_LED_SHARED: + tp->led_ctrl = LED_CTRL_MODE_SHARED; + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + break; + + case SHASTA_EXT_LED_MAC: + tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; + break; + + case SHASTA_EXT_LED_COMBO: + tp->led_ctrl = LED_CTRL_MODE_COMBO; + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + break; + + } + + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && + tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { + tg3_flag_set(tp, EEPROM_WRITE_PROT); + if ((tp->pdev->subsystem_vendor == + PCI_VENDOR_ID_ARIMA) && + (tp->pdev->subsystem_device == 0x205a || + tp->pdev->subsystem_device == 0x2063)) + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + } else { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + } + + if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ENABLE_APE); + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && + !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) + tg3_flag_clear(tp, WOL_CAP); + + if (tg3_flag(tp, WOL_CAP) && + (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + + if (cfg2 & (1 << 17)) + tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; + + /* serdes signal pre-emphasis in register 0x590 set by */ + /* bootcode if bit 18 is set */ + if (cfg2 & (1 << 18)) + tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; + + if ((tg3_flag(tp, 57765_PLUS) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && + (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) + tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; + + if (tg3_flag(tp, PCI_EXPRESS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS)) { + u32 cfg3; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); + if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) + tg3_flag_set(tp, ASPM_WORKAROUND); + } + + if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) + tg3_flag_set(tp, RGMII_INBAND_DISABLE); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); + } + done: + if (tg3_flag(tp, WOL_CAP)) + device_set_wakeup_enable(&tp->pdev->dev, + tg3_flag(tp, WOL_ENABLE)); + else + device_set_wakeup_capable(&tp->pdev->dev, false); + } + + static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) + { + int i; + u32 val; + + tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); + tw32(OTP_CTRL, cmd); + + /* Wait for up to 1 ms for command to execute. */ + for (i = 0; i < 100; i++) { + val = tr32(OTP_STATUS); + if (val & OTP_STATUS_CMD_DONE) + break; + udelay(10); + } + + return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; + } + + /* Read the gphy configuration from the OTP region of the chip. The gphy + * configuration is a 32-bit value that straddles the alignment boundary. + * We do two 32-bit reads and then shift and merge the results. + */ + static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) + { + u32 bhalf_otp, thalf_otp; + + tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) + return 0; + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + thalf_otp = tr32(OTP_READ_DATA); + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + bhalf_otp = tr32(OTP_READ_DATA); + + return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); + } + + static void __devinit tg3_phy_init_link_config(struct tg3 *tp) + { + u32 adv = ADVERTISED_Autoneg | + ADVERTISED_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + adv |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + adv |= ADVERTISED_FIBRE; + + tp->link_config.advertising = adv; + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tp->link_config.orig_speed = SPEED_INVALID; + tp->link_config.orig_duplex = DUPLEX_INVALID; + tp->link_config.orig_autoneg = AUTONEG_INVALID; + } + + static int __devinit tg3_phy_probe(struct tg3 *tp) + { + u32 hw_phy_id_1, hw_phy_id_2; + u32 hw_phy_id, hw_phy_id_masked; + int err; + + /* flow control autonegotiation is default behavior */ + tg3_flag_set(tp, PAUSE_AUTONEG); + tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + + if (tg3_flag(tp, USE_PHYLIB)) + return tg3_phy_init(tp); + + /* Reading the PHY ID register can conflict with ASF + * firmware access to the PHY hardware. + */ + err = 0; + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { + hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; + } else { + /* Now read the physical PHY_ID from the chip and verify + * that it is sane. If it doesn't look good, we fall back + * to either the hard-coded table based PHY_ID and failing + * that the value found in the eeprom area. + */ + err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); + err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); + + hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; + hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; + hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; + + hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; + } + + if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { + tp->phy_id = hw_phy_id; + if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; + } else { + if (tp->phy_id != TG3_PHY_ID_INVALID) { + /* Do nothing, phy ID already set up in + * tg3_get_eeprom_hw_cfg(). + */ + } else { + struct subsys_tbl_ent *p; + + /* No eeprom signature? Try the hardcoded + * subsys device table. + */ + p = tg3_lookup_by_subsys(tp); + if (!p) + return -ENODEV; + + tp->phy_id = p->phy_id; + if (!tp->phy_id || + tp->phy_id == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + } + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 || + (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 && + tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && + tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + + tg3_phy_init_link_config(tp); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + !tg3_flag(tp, ENABLE_APE) && + !tg3_flag(tp, ENABLE_ASF)) { + u32 bmsr, mask; + + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + goto skip_phy_reset; + + err = tg3_phy_reset(tp); + if (err) + return err; + + tg3_phy_set_wirespeed(tp); + + mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); + if (!tg3_copper_is_advertising_all(tp, mask)) { + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } + } + + skip_phy_reset: + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + err = tg3_init_5401phy_dsp(tp); + } + + return err; + } + + static void __devinit tg3_read_vpd(struct tg3 *tp) + { + u8 *vpd_data; + unsigned int block_end, rosize, len; + u32 vpdlen; + int j, i = 0; + + vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); + if (!vpd_data) + goto out_no_vpd; + + i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; + + rosize = pci_vpd_lrdt_size(&vpd_data[i]); + block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; + i += PCI_VPD_LRDT_TAG_SIZE; + + if (block_end > vpdlen) + goto out_not_found; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (j > 0) { + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len != 4 || + memcmp(&vpd_data[j], "1028", 4)) + goto partno; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (j < 0) + goto partno; + + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end) + goto partno; + + memcpy(tp->fw_ver, &vpd_data[j], len); + strncat(tp->fw_ver, " bc ", vpdlen - len - 1); + } + + partno: + i = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_PARTNO); + if (i < 0) + goto out_not_found; + + len = pci_vpd_info_field_size(&vpd_data[i]); + + i += PCI_VPD_INFO_FLD_HDR_SIZE; + if (len > TG3_BPN_SIZE || + (len + i) > vpdlen) + goto out_not_found; + + memcpy(tp->board_part_number, &vpd_data[i], len); + + out_not_found: + kfree(vpd_data); + if (tp->board_part_number[0]) + return; + + out_no_vpd: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) + strcpy(tp->board_part_number, "BCM5717"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) + strcpy(tp->board_part_number, "BCM5718"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) + strcpy(tp->board_part_number, "BCM57780"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) + strcpy(tp->board_part_number, "BCM57760"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) + strcpy(tp->board_part_number, "BCM57790"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) + strcpy(tp->board_part_number, "BCM57788"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) + strcpy(tp->board_part_number, "BCM57761"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) + strcpy(tp->board_part_number, "BCM57765"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) + strcpy(tp->board_part_number, "BCM57781"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) + strcpy(tp->board_part_number, "BCM57785"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) + strcpy(tp->board_part_number, "BCM57791"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + strcpy(tp->board_part_number, "BCM57795"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + strcpy(tp->board_part_number, "BCM95906"); + } else { + nomatch: + strcpy(tp->board_part_number, "none"); + } + } + + static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) + { + u32 val; + + if (tg3_nvram_read(tp, offset, &val) || + (val & 0xfc000000) != 0x0c000000 || + tg3_nvram_read(tp, offset + 4, &val) || + val != 0) + return 0; + + return 1; + } + + static void __devinit tg3_read_bc_ver(struct tg3 *tp) + { + u32 val, offset, start, ver_offset; + int i, dst_off; + bool newver = false; + + if (tg3_nvram_read(tp, 0xc, &offset) || + tg3_nvram_read(tp, 0x4, &start)) + return; + + offset = tg3_nvram_logical_addr(tp, offset); + + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val & 0xfc000000) == 0x0c000000) { + if (tg3_nvram_read(tp, offset + 4, &val)) + return; + + if (val == 0) + newver = true; + } + + dst_off = strlen(tp->fw_ver); + + if (newver) { + if (TG3_VER_SIZE - dst_off < 16 || + tg3_nvram_read(tp, offset + 8, &ver_offset)) + return; + + offset = offset + ver_offset - start; + for (i = 0; i < 16; i += 4) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset + i, &v)) + return; + + memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); + } + } else { + u32 major, minor; + + if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) + return; + + major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> + TG3_NVM_BCVER_MAJSFT; + minor = ver_offset & TG3_NVM_BCVER_MINMSK; + snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, + "v%d.%02d", major, minor); + } + } + + static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) + { + u32 val, major, minor; + + /* Use native endian representation */ + if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) + return; + + major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> + TG3_NVM_HWSB_CFG1_MAJSFT; + minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> + TG3_NVM_HWSB_CFG1_MINSFT; + + snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); + } + + static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) + { + u32 offset, major, minor, build; + + strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); + + if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) + return; + + switch (val & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + offset = TG3_EEPROM_SB_F1R0_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_2: + offset = TG3_EEPROM_SB_F1R2_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_3: + offset = TG3_EEPROM_SB_F1R3_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_4: + offset = TG3_EEPROM_SB_F1R4_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_5: + offset = TG3_EEPROM_SB_F1R5_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_6: + offset = TG3_EEPROM_SB_F1R6_EDH_OFF; + break; + default: + return; + } + + if (tg3_nvram_read(tp, offset, &val)) + return; + + build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> + TG3_EEPROM_SB_EDH_BLD_SHFT; + major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> + TG3_EEPROM_SB_EDH_MAJ_SHFT; + minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; + + if (minor > 99 || build > 26) + return; + + offset = strlen(tp->fw_ver); + snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, + " v%d.%02d", major, minor); + + if (build > 0) { + offset = strlen(tp->fw_ver); + if (offset < TG3_VER_SIZE - 1) + tp->fw_ver[offset] = 'a' + build - 1; + } + } + + static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) + { + u32 val, offset, start; + int i, vlen; + + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) + break; + } + + if (offset == TG3_NVM_DIR_END) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + start = 0x08000000; + else if (tg3_nvram_read(tp, offset - 4, &start)) + return; + + if (tg3_nvram_read(tp, offset + 4, &offset) || + !tg3_fw_img_is_valid(tp, offset) || + tg3_nvram_read(tp, offset + 8, &val)) + return; + + offset += val - start; + + vlen = strlen(tp->fw_ver); + + tp->fw_ver[vlen++] = ','; + tp->fw_ver[vlen++] = ' '; + + for (i = 0; i < 4; i++) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset, &v)) + return; + + offset += sizeof(v); + + if (vlen > TG3_VER_SIZE - sizeof(v)) { + memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); + break; + } + + memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); + vlen += sizeof(v); + } + } + + static void __devinit tg3_read_dash_ver(struct tg3 *tp) + { + int vlen; + u32 apedata; + char *fwtype; + + if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); + + if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { + tg3_flag_set(tp, APE_HAS_NCSI); + fwtype = "NCSI"; + } else { + fwtype = "DASH"; + } + + vlen = strlen(tp->fw_ver); + + snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", + fwtype, + (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, + (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, + (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, + (apedata & APE_FW_VERSION_BLDMSK)); + } + + static void __devinit tg3_read_fw_ver(struct tg3 *tp) + { + u32 val; + bool vpd_vers = false; + + if (tp->fw_ver[0] != 0) + vpd_vers = true; + + if (tg3_flag(tp, NO_NVRAM)) { + strcat(tp->fw_ver, "sb"); + return; + } + + if (tg3_nvram_read(tp, 0, &val)) + return; + + if (val == TG3_EEPROM_MAGIC) + tg3_read_bc_ver(tp); + else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) + tg3_read_sb_ver(tp, val); + else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + tg3_read_hwsb_ver(tp); + else + return; + + if (vpd_vers) + goto done; + + if (tg3_flag(tp, ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF)) + tg3_read_dash_ver(tp); + } else if (tg3_flag(tp, ENABLE_ASF)) { + tg3_read_mgmtfw_ver(tp); + } + + done: + tp->fw_ver[TG3_VER_SIZE - 1] = 0; + } + + static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); + + static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) + { + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + return TG3_RX_RET_MAX_SIZE_5717; + else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) + return TG3_RX_RET_MAX_SIZE_5700; + else + return TG3_RX_RET_MAX_SIZE_5705; + } + + static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, + { }, + }; + + static int __devinit tg3_get_invariants(struct tg3 *tp) + { + u32 misc_ctrl_reg; + u32 pci_state_reg, grc_misc_cfg; + u32 val; + u16 pci_cmd; + int err; + + /* Force memory write invalidate off. If we leave it on, + * then on 5700_BX chips we have to enable a workaround. + * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary + * to match the cacheline size. The Broadcom driver have this + * workaround but turns MWI off all the times so never uses + * it. This seems to suggest that the workaround is insufficient. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + /* Important! -- Make sure register accesses are byteswapped + * correctly. Also, for those chips that require it, make + * sure that indirect register accesses are enabled before + * the first operation. + */ + pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + &misc_ctrl_reg); + tp->misc_host_ctrl |= (misc_ctrl_reg & + MISC_HOST_CTRL_CHIPREV); + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + tp->pci_chip_rev_id = (misc_ctrl_reg >> + MISC_HOST_CTRL_CHIPREV_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { + u32 prod_id_asic_rev; + + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN2_PRODID_ASICREV, + &prod_id_asic_rev); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN15_PRODID_ASICREV, + &prod_id_asic_rev); + else + pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, + &prod_id_asic_rev); + + tp->pci_chip_rev_id = prod_id_asic_rev; + } + + /* Wrong chip ID in 5752 A0. This code can be removed later + * as A0 is not in production. + */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) + tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; + + /* If we have 5702/03 A1 or A2 on certain ICH chipsets, + * we need to disable memory and use config. cycles + * only to access all registers. The 5702/03 chips + * can mistakenly decode the special cycles from the + * ICH chipsets as memory write cycles, causing corruption + * of register and memory space. Only certain ICH bridges + * will drive special cycles with non-zero data during the + * address phase which can fall within the 5703's address + * range. This is not an ICH bug as the PCI spec allows + * non-zero address during special cycles. However, only + * these ICH bridges are known to drive non-zero addresses + * during special cycles. + * + * Since special cycles do not cross PCI bridges, we only + * enable this workaround if the 5703 is on the secondary + * bus of these ICH bridges. + */ + if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || + (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + u32 rev; + } ich_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, + 0xa }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, + PCI_ANY_ID }, + { }, + }; + struct tg3_dev_id *pci_id = &ich_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (pci_id->rev != PCI_ANY_ID) { + if (bridge->revision > pci_id->rev) + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number == + tp->pdev->bus->number)) { + tg3_flag_set(tp, ICH_WORKAROUND); + pci_dev_put(bridge); + break; + } + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + } bridge_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, + { }, + }; + struct tg3_dev_id *pci_id = &bridge_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, + pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->subordinate >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 5701_DMA_BUG); + pci_dev_put(bridge); + break; + } + } + } + + /* The EPB bridge inside 5714, 5715, and 5780 cannot support + * DMA addresses > 40-bit. This bridge may have other additional + * 57xx devices behind it in some 4-port NIC designs for example. + * Any tg3 device found behind the bridge will also need the 40-bit + * DMA workaround. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + tg3_flag_set(tp, 5780_CLASS); + tg3_flag_set(tp, 40BIT_DMA_BUG); + tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); + } else { + struct pci_dev *bridge = NULL; + + do { + bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, + PCI_DEVICE_ID_SERVERWORKS_EPB, + bridge); + if (bridge && bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->subordinate >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 40BIT_DMA_BUG); + pci_dev_put(bridge); + break; + } + } while (bridge); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) + tp->pdev_peer = tg3_find_peer(tp); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_flag_set(tp, 5717_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || + tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, 57765_PLUS); + + /* Intentionally exclude ASIC_REV_5906 */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, 5755_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || + tg3_flag(tp, 5755_PLUS) || + tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, 5750_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, 5705_PLUS); + + /* Determine TSO capabilities */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) + ; /* Do nothing. HW bug. */ + else if (tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, HW_TSO_3); + else if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_flag_set(tp, HW_TSO_2); + else if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, HW_TSO_1); + tg3_flag_set(tp, TSO_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && + tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) + tg3_flag_clear(tp, TSO_BUG); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + tg3_flag_set(tp, TSO_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) + tp->fw_needed = FIRMWARE_TG3TSO5; + else + tp->fw_needed = FIRMWARE_TG3TSO; + } + + /* Selectively allow TSO based on operating conditions */ + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3) || + (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) + tg3_flag_set(tp, TSO_CAPABLE); + else { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) + tp->fw_needed = FIRMWARE_TG3; + + tp->irq_max = 1; + + if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSI); + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && + tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && + tp->pdev_peer == tp->pdev)) + tg3_flag_clear(tp, SUPPORT_MSI); + + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_flag_set(tp, 1SHOT_MSI); + } + + if (tg3_flag(tp, 57765_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSIX); + tp->irq_max = TG3_IRQ_MAX_VECS; + } + } + + if (tg3_flag(tp, 5755_PLUS)) + tg3_flag_set(tp, SHORT_DMA_BUG); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_flag_set(tp, 4K_FIFO_LIMIT); + + if (tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, LRG_PROD_RING_CAP); + + if (tg3_flag(tp, 57765_PLUS) && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0) + tg3_flag_set(tp, USE_JUMBO_BDFLAG); + + if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, USE_JUMBO_BDFLAG)) + tg3_flag_set(tp, JUMBO_CAPABLE); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + + if (pci_is_pcie(tp->pdev)) { + u16 lnkctl; + + tg3_flag_set(tp, PCI_EXPRESS); + + tp->pcie_readrq = 4096; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tp->pcie_readrq = 2048; + + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5906) { + tg3_flag_clear(tp, HW_TSO_2); + tg3_flag_clear(tp, TSO_CAPABLE); + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) + tg3_flag_set(tp, CLKREQ_BUG); + } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { + tg3_flag_set(tp, L1PLLPD_EN); + } + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + /* BCM5785 devices are effectively PCIe devices, and should + * follow PCIe codepaths, but do not have a PCIe capabilities + * section. + */ + tg3_flag_set(tp, PCI_EXPRESS); + } else if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS)) { + tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); + if (!tp->pcix_cap) { + dev_err(&tp->pdev->dev, + "Cannot find PCI-X capability, aborting\n"); + return -EIO; + } + + if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) + tg3_flag_set(tp, PCIX_MODE); + } + + /* If we have an AMD 762 or VIA K8T800 chipset, write + * reordering to the mailbox registers done by the host + * controller can cause major troubles. We read back from + * every mailbox register write to force the writes to be + * posted to the chip in order. + */ + if (pci_dev_present(tg3_write_reorder_chipsets) && + !tg3_flag(tp, PCI_EXPRESS)) + tg3_flag_set(tp, MBOX_WRITE_REORDER); + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + &tp->pci_cacheline_sz); + pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, + &tp->pci_lat_timer); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && + tp->pci_lat_timer < 64) { + tp->pci_lat_timer = 64; + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + + /* Important! -- It is critical that the PCI-X hw workaround + * situation is decided before the first MMIO register access. + */ + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { + /* 5700 BX chips need to have their TX producer index + * mailboxes written twice to workaround a bug. + */ + tg3_flag_set(tp, TXD_MBOX_HWBUG); + + /* If we are in PCI-X mode, enable register write workaround. + * + * The workaround is to use indirect register accesses + * for all chip writes not to mailbox registers. + */ + if (tg3_flag(tp, PCIX_MODE)) { + u32 pm_reg; + + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + + /* The chip can have it's power management PCI config + * space registers clobbered due to this bug. + * So explicitly force the chip into D0 here. + */ + pci_read_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, + &pm_reg); + pm_reg &= ~PCI_PM_CTRL_STATE_MASK; + pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; + pci_write_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, + pm_reg); + + /* Also, force SERR#/PERR# in PCI command. */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + } + + if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) + tg3_flag_set(tp, PCI_HIGH_SPEED); + if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) + tg3_flag_set(tp, PCI_32BIT); + + /* Chip-specific fixup from Broadcom driver */ + if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && + (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { + pci_state_reg |= PCISTATE_RETRY_SAME_DMA; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); + } + + /* Default fast path register access methods */ + tp->read32 = tg3_read32; + tp->write32 = tg3_write32; + tp->read32_mbox = tg3_read32; + tp->write32_mbox = tg3_write32; + tp->write32_tx_mbox = tg3_write32; + tp->write32_rx_mbox = tg3_write32; + + /* Various workaround register access methods */ + if (tg3_flag(tp, PCIX_TARGET_HWBUG)) + tp->write32 = tg3_write_indirect_reg32; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || + (tg3_flag(tp, PCI_EXPRESS) && + tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { + /* + * Back to back register writes can cause problems on these + * chips, the workaround is to read back all reg writes + * except those to mailbox regs. + * + * See tg3_write_indirect_reg32(). + */ + tp->write32 = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + if (tg3_flag(tp, MBOX_WRITE_REORDER)) + tp->write32_rx_mbox = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, ICH_WORKAROUND)) { + tp->read32 = tg3_read_indirect_reg32; + tp->write32 = tg3_write_indirect_reg32; + tp->read32_mbox = tg3_read_indirect_mbox; + tp->write32_mbox = tg3_write_indirect_mbox; + tp->write32_tx_mbox = tg3_write_indirect_mbox; + tp->write32_rx_mbox = tg3_write_indirect_mbox; + + iounmap(tp->regs); + tp->regs = NULL; + + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_MEMORY; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->read32_mbox = tg3_read32_mbox_5906; + tp->write32_mbox = tg3_write32_mbox_5906; + tp->write32_tx_mbox = tg3_write32_mbox_5906; + tp->write32_rx_mbox = tg3_write32_mbox_5906; + } + + if (tp->write32 == tg3_write_indirect_reg32 || + (tg3_flag(tp, PCIX_MODE) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) + tg3_flag_set(tp, SRAM_USE_CONFIG); + + /* The memory arbiter has to be enabled in order for SRAM accesses + * to succeed. Normally on powerup the tg3 chip firmware will make + * sure it is enabled, but other entities such as system netboot + * code might disable it. + */ + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + if (tg3_flag(tp, PCIX_MODE)) { + pci_read_config_dword(tp->pdev, + tp->pcix_cap + PCI_X_STATUS, &val); + tp->pci_fn = val & 0x7; + } else { + tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; + } + + /* Get eeprom hw config before calling tg3_set_power_state(). + * In particular, the TG3_FLAG_IS_NIC flag must be + * determined before calling tg3_set_power_state() so that + * we know whether or not to switch out of Vaux power. + * When the flag is set, it means that GPIO1 is used for eeprom + * write protect and also implies that it is a LOM where GPIOs + * are not used to switch power. + */ + tg3_get_eeprom_hw_cfg(tp); + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, + pci_state_reg); + + tg3_ape_lock_init(tp); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, CPMU_PRESENT); + + /* Set up tp->grc_local_ctrl before calling + * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high + * will bring 5700's external PHY out of reset. + * It is also used as eeprom write protect on LOMs. + */ + tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + /* Unused GPIO3 must be driven as output on 5752 because there + * are no pull-up resistors on unused GPIO pins. + */ + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* Turn off the debug UART. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + if (tg3_flag(tp, IS_NIC)) + /* Keep VMain power. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OUTPUT0; + } + + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + + /* Derive initial jumbo mode from MTU assigned in + * ether_setup() via the alloc_etherdev() call + */ + if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, JUMBO_RING_ENABLE); + + /* Determine WakeOnLan speed to use. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { + tg3_flag_clear(tp, WOL_SPEED_100MB); + } else { + tg3_flag_set(tp, WOL_SPEED_100MB); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->phy_flags |= TG3_PHYFLG_IS_FET; + + /* A few boards don't want Ethernet@WireSpeed phy feature */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && + (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || + (tp->phy_flags & TG3_PHYFLG_IS_FET) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) + tp->phy_flags |= TG3_PHYFLG_ADC_BUG; + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) + tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; + + if (tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && + !tg3_flag(tp, 57765_PLUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && + tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) + tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) + tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; + } else + tp->phy_flags |= TG3_PHYFLG_BER_BUG; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + tp->phy_otp = tg3_read_otp_phycfg(tp); + if (tp->phy_otp == 0) + tp->phy_otp = TG3_OTP_DEFAULT; + } + + if (tg3_flag(tp, CPMU_PRESENT)) + tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; + else + tp->mi_mode = MAC_MI_MODE_BASE; + + tp->coalesce_mode = 0; + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) + tp->coalesce_mode |= HOSTCC_MODE_32BYTE; + + /* Set these bits to enable statistics workaround. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { + tp->coalesce_mode |= HOSTCC_MODE_ATTN; + tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + tg3_flag_set(tp, USE_PHYLIB); + + err = tg3_mdio_init(tp); + if (err) + return err; + + /* Initialize data/descriptor byte/word swapping. */ + val = tr32(GRC_MODE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | + GRC_MODE_WORD_SWAP_B2HRX_DATA | + GRC_MODE_B2HRX_ENABLE | + GRC_MODE_HTX2B_ENABLE | + GRC_MODE_HOST_STACKUP); + else + val &= GRC_MODE_HOST_STACKUP; + + tw32(GRC_MODE, val | tp->grc_mode); + + tg3_switch_clocks(tp); + + /* Clear this out for sanity. */ + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && + !tg3_flag(tp, PCIX_TARGET_HWBUG)) { + u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); + + if (chiprevid == CHIPREV_ID_5701_A0 || + chiprevid == CHIPREV_ID_5701_B0 || + chiprevid == CHIPREV_ID_5701_B2 || + chiprevid == CHIPREV_ID_5701_B5) { + void __iomem *sram_base; + + /* Write some dummy words into the SRAM status block + * area, see if it reads back correctly. If the return + * value is bad, force enable the PCIX workaround. + */ + sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; + + writel(0x00000000, sram_base); + writel(0x00000000, sram_base + 4); + writel(0xffffffff, sram_base + 4); + if (readl(sram_base) != 0x00000000) + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + } + } + + udelay(50); + tg3_nvram_init(tp); + + grc_misc_cfg = tr32(GRC_MISC_CFG); + grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || + grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) + tg3_flag_set(tp, IS_5788); + + if (!tg3_flag(tp, IS_5788) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tg3_flag_set(tp, TAGGED_STATUS); + if (tg3_flag(tp, TAGGED_STATUS)) { + tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD); + + tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + } + + /* Preserve the APE MAC_MODE bits */ + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + else + tp->mac_mode = 0; + + /* these are limited to 10/100 only */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && + (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && + (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || + (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && + (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || + (tp->phy_flags & TG3_PHYFLG_IS_FET)) + tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; + + err = tg3_phy_probe(tp); + if (err) { + dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); + /* ... but do not return immediately ... */ + tg3_mdio_fini(tp); + } + + tg3_read_vpd(tp); + tg3_read_fw_ver(tp); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } else { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + else + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } + + /* 5700 {AX,BX} chips have a broken status block link + * change bit implementation, so we must use the + * status register in those cases. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tg3_flag_set(tp, USE_LINKCHG_REG); + else + tg3_flag_clear(tp, USE_LINKCHG_REG); + + /* The led_ctrl is set during tg3_phy_probe, here we might + * have to force the link status polling mechanism based + * upon subsystem IDs. + */ + if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + tg3_flag_set(tp, USE_LINKCHG_REG); + } + + /* For all SERDES we poll the MAC status register. */ + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + tg3_flag_set(tp, POLL_SERDES); + else + tg3_flag_clear(tp, POLL_SERDES); + + tp->rx_offset = NET_IP_ALIGN; + tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + tg3_flag(tp, PCIX_MODE)) { + tp->rx_offset = 0; + #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + tp->rx_copy_thresh = ~(u16)0; + #endif + } + + tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; + tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; + tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; + + tp->rx_std_max_post = tp->rx_std_ring_mask + 1; + + /* Increment the rx prod index on the rx std ring by at most + * 8 for these chips to workaround hw errata. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tp->rx_std_max_post = 8; + + if (tg3_flag(tp, ASPM_WORKAROUND)) + tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & + PCIE_PWR_MGMT_L1_THRESH_MSK; + + return err; + } + + #ifdef CONFIG_SPARC + static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) + { + struct net_device *dev = tp->dev; + struct pci_dev *pdev = tp->pdev; + struct device_node *dp = pci_device_to_OF_node(pdev); + const unsigned char *addr; + int len; + + addr = of_get_property(dp, "local-mac-address", &len); + if (addr && len == 6) { + memcpy(dev->dev_addr, addr, 6); + memcpy(dev->perm_addr, dev->dev_addr, 6); + return 0; + } + return -ENODEV; + } + + static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) + { + struct net_device *dev = tp->dev; + + memcpy(dev->dev_addr, idprom->id_ethaddr, 6); + memcpy(dev->perm_addr, idprom->id_ethaddr, 6); + return 0; + } + #endif + + static int __devinit tg3_get_device_address(struct tg3 *tp) + { + struct net_device *dev = tp->dev; + u32 hi, lo, mac_offset; + int addr_ok = 0; + + #ifdef CONFIG_SPARC + if (!tg3_get_macaddr_sparc(tp)) + return 0; + #endif + + mac_offset = 0x7c; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + tg3_flag(tp, 5780_CLASS)) { + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + mac_offset = 0xcc; + if (tg3_nvram_lock(tp)) + tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); + else + tg3_nvram_unlock(tp); + } else if (tg3_flag(tp, 5717_PLUS)) { + if (tp->pci_fn & 1) + mac_offset = 0xcc; + if (tp->pci_fn > 1) + mac_offset += 0x18c; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mac_offset = 0x10; + + /* First try to get it from MAC address mailbox. */ + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); + if ((hi >> 16) == 0x484b) { + dev->dev_addr[0] = (hi >> 8) & 0xff; + dev->dev_addr[1] = (hi >> 0) & 0xff; + + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[5] = (lo >> 0) & 0xff; + + /* Some old bootcode may report a 0 MAC address in SRAM */ + addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); + } + if (!addr_ok) { + /* Next, try NVRAM. */ + if (!tg3_flag(tp, NO_NVRAM) && + !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && + !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { + memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); + memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); + } + /* Finally just fetch it out of the MAC control regs. */ + else { + hi = tr32(MAC_ADDR_0_HIGH); + lo = tr32(MAC_ADDR_0_LOW); + + dev->dev_addr[5] = lo & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[1] = hi & 0xff; + dev->dev_addr[0] = (hi >> 8) & 0xff; + } + } + + if (!is_valid_ether_addr(&dev->dev_addr[0])) { + #ifdef CONFIG_SPARC + if (!tg3_get_default_macaddr_sparc(tp)) + return 0; + #endif + return -EINVAL; + } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + return 0; + } + + #define BOUNDARY_SINGLE_CACHELINE 1 + #define BOUNDARY_MULTI_CACHELINE 2 + + static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) + { + int cacheline_size; + u8 byte; + int goal; + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); + if (byte == 0) + cacheline_size = 1024; + else + cacheline_size = (int) byte * 4; + + /* On 5703 and later chips, the boundary bits have no + * effect. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + !tg3_flag(tp, PCI_EXPRESS)) + goto out; + + #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) + goal = BOUNDARY_MULTI_CACHELINE; + #else + #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) + goal = BOUNDARY_SINGLE_CACHELINE; + #else + goal = 0; + #endif + #endif + + if (tg3_flag(tp, 57765_PLUS)) { + val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + goto out; + } + + if (!goal) + goto out; + + /* PCI controllers on most RISC systems tend to disconnect + * when a device tries to burst across a cache-line boundary. + * Therefore, letting tg3 do so just wastes PCI bandwidth. + * + * Unfortunately, for PCI-E there are only limited + * write-side controls for this, and thus for reads + * we will still get the disconnects. We'll also waste + * these PCI cycles for both read and write for chips + * other than 5700 and 5701 which do not implement the + * boundary bits. + */ + if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | + DMA_RWCTRL_WRITE_BNDRY_128_PCIX); + } else { + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + } + break; + + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | + DMA_RWCTRL_WRITE_BNDRY_256_PCIX); + break; + + default: + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + break; + } + } else if (tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; + break; + } + /* fallthrough */ + case 128: + default: + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; + break; + } + } else { + switch (cacheline_size) { + case 16: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_16 | + DMA_RWCTRL_WRITE_BNDRY_16); + break; + } + /* fallthrough */ + case 32: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_32 | + DMA_RWCTRL_WRITE_BNDRY_32); + break; + } + /* fallthrough */ + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_64 | + DMA_RWCTRL_WRITE_BNDRY_64); + break; + } + /* fallthrough */ + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128 | + DMA_RWCTRL_WRITE_BNDRY_128); + break; + } + /* fallthrough */ + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256 | + DMA_RWCTRL_WRITE_BNDRY_256); + break; + case 512: + val |= (DMA_RWCTRL_READ_BNDRY_512 | + DMA_RWCTRL_WRITE_BNDRY_512); + break; + case 1024: + default: + val |= (DMA_RWCTRL_READ_BNDRY_1024 | + DMA_RWCTRL_WRITE_BNDRY_1024); + break; + } + } + + out: + return val; + } + + static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) + { + struct tg3_internal_buffer_desc test_desc; + u32 sram_dma_descs; + int i, ret; + + sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; + + tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); + tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); + tw32(RDMAC_STATUS, 0); + tw32(WDMAC_STATUS, 0); + + tw32(BUFMGR_MODE, 0); + tw32(FTQ_RESET, 0); + + test_desc.addr_hi = ((u64) buf_dma) >> 32; + test_desc.addr_lo = buf_dma & 0xffffffff; + test_desc.nic_mbuf = 0x00002100; + test_desc.len = size; + + /* + * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz + * the *second* time the tg3 driver was getting loaded after an + * initial scan. + * + * Broadcom tells me: + * ...the DMA engine is connected to the GRC block and a DMA + * reset may affect the GRC block in some unpredictable way... + * The behavior of resets to individual blocks has not been tested. + * + * Broadcom noted the GRC reset will also reset all sub-components. + */ + if (to_device) { + test_desc.cqid_sqid = (13 << 8) | 2; + + tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); + udelay(40); + } else { + test_desc.cqid_sqid = (16 << 8) | 7; + + tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); + udelay(40); + } + test_desc.flags = 0x00000005; + + for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { + u32 val; + + val = *(((u32 *)&test_desc) + i); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, + sram_dma_descs + (i * sizeof(u32))); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + } + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + + if (to_device) + tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); + else + tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); + + ret = -ENODEV; + for (i = 0; i < 40; i++) { + u32 val; + + if (to_device) + val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); + else + val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); + if ((val & 0xffff) == sram_dma_descs) { + ret = 0; + break; + } + + udelay(100); + } + + return ret; + } + + #define TEST_BUFFER_SIZE 0x2000 + + static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, + { }, + }; + + static int __devinit tg3_test_dma(struct tg3 *tp) + { + dma_addr_t buf_dma; + u32 *buf, saved_dma_rwctrl; + int ret = 0; + + buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, + &buf_dma, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out_nofree; + } + + tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | + (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); + + tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); + + if (tg3_flag(tp, 57765_PLUS)) + goto out; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* DMA read watermark not used on PCIE */ + tp->dma_rwctrl |= 0x00180000; + } else if (!tg3_flag(tp, PCIX_MODE)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) + tp->dma_rwctrl |= 0x003f0000; + else + tp->dma_rwctrl |= 0x003f000f; + } else { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); + u32 read_water = 0x7; + + /* If the 5704 is behind the EPB bridge, we can + * do the less restrictive ONE_DMA workaround for + * better performance. + */ + if (tg3_flag(tp, 40BIT_DMA_BUG) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tp->dma_rwctrl |= 0x8000; + else if (ccval == 0x6 || ccval == 0x7) + tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) + read_water = 4; + /* Set bit 23 to enable PCIX hw bug fix */ + tp->dma_rwctrl |= + (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | + (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | + (1 << 23); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { + /* 5780 always in PCIX mode */ + tp->dma_rwctrl |= 0x00144000; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + /* 5714 always in PCIX mode */ + tp->dma_rwctrl |= 0x00148000; + } else { + tp->dma_rwctrl |= 0x001b000f; + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tp->dma_rwctrl &= 0xfffffff0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + /* Remove this if it causes problems for some boards. */ + tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; + + /* On 5700/5701 chips, we need to set this bit. + * Otherwise the chip will issue cacheline transactions + * to streamable DMA memory with not all the byte + * enables turned on. This is an error on several + * RISC PCI controllers, in particular sparc64. + * + * On 5703/5704 chips, this bit has been reassigned + * a different meaning. In particular, it is used + * on those chips to enable a PCI-X workaround. + */ + tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + + #if 0 + /* Unneeded, already done by tg3_get_invariants. */ + tg3_switch_clocks(tp); + #endif + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + goto out; + + /* It is best to perform DMA test with maximum write burst size + * to expose the 5700/5701 write DMA bug. + */ + saved_dma_rwctrl = tp->dma_rwctrl; + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + + while (1) { + u32 *p = buf, i; + + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) + p[i] = i; + + /* Send the buffer to the chip. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); + if (ret) { + dev_err(&tp->pdev->dev, + "%s: Buffer write failed. err = %d\n", + __func__, ret); + break; + } + + #if 0 + /* validate data reached card RAM correctly. */ + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { + u32 val; + tg3_read_mem(tp, 0x2100 + (i*4), &val); + if (le32_to_cpu(val) != p[i]) { + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on device! " + "(%d != %d)\n", __func__, val, i); + /* ret = -ENODEV here? */ + } + p[i] = 0; + } + #endif + /* Now read it back. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); + if (ret) { + dev_err(&tp->pdev->dev, "%s: Buffer read failed. " + "err = %d\n", __func__, ret); + break; + } + + /* Verify it. */ + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { + if (p[i] == i) + continue; + + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + break; + } else { + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on read back! " + "(%d != %d)\n", __func__, p[i], i); + ret = -ENODEV; + goto out; + } + } + + if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { + /* Success. */ + ret = 0; + break; + } + } + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + /* DMA test passed without adjusting DMA boundary, + * now look for chipsets that are known to expose the + * DMA bug without failing the test. + */ + if (pci_dev_present(tg3_dma_wait_state_chipsets)) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + } else { + /* Safe to use the calculated DMA boundary. */ + tp->dma_rwctrl = saved_dma_rwctrl; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + + out: + dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); + out_nofree: + return ret; + } + + static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) + { + if (tg3_flag(tp, 57765_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_57765; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_57765; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_57765; + } else if (tg3_flag(tp, 5705_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5705; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5705; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5906; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5906; + } + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_5780; + } else { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO; + } + + tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; + tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; + } + + static char * __devinit tg3_phy_string(struct tg3 *tp) + { + switch (tp->phy_id & TG3_PHY_ID_MASK) { + case TG3_PHY_ID_BCM5400: return "5400"; + case TG3_PHY_ID_BCM5401: return "5401"; + case TG3_PHY_ID_BCM5411: return "5411"; + case TG3_PHY_ID_BCM5701: return "5701"; + case TG3_PHY_ID_BCM5703: return "5703"; + case TG3_PHY_ID_BCM5704: return "5704"; + case TG3_PHY_ID_BCM5705: return "5705"; + case TG3_PHY_ID_BCM5750: return "5750"; + case TG3_PHY_ID_BCM5752: return "5752"; + case TG3_PHY_ID_BCM5714: return "5714"; + case TG3_PHY_ID_BCM5780: return "5780"; + case TG3_PHY_ID_BCM5755: return "5755"; + case TG3_PHY_ID_BCM5787: return "5787"; + case TG3_PHY_ID_BCM5784: return "5784"; + case TG3_PHY_ID_BCM5756: return "5722/5756"; + case TG3_PHY_ID_BCM5906: return "5906"; + case TG3_PHY_ID_BCM5761: return "5761"; + case TG3_PHY_ID_BCM5718C: return "5718C"; + case TG3_PHY_ID_BCM5718S: return "5718S"; + case TG3_PHY_ID_BCM57765: return "57765"; + case TG3_PHY_ID_BCM5719C: return "5719C"; + case TG3_PHY_ID_BCM5720C: return "5720C"; + case TG3_PHY_ID_BCM8002: return "8002/serdes"; + case 0: return "serdes"; + default: return "unknown"; + } + } + + static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) + { + if (tg3_flag(tp, PCI_EXPRESS)) { + strcpy(str, "PCI Express"); + return str; + } else if (tg3_flag(tp, PCIX_MODE)) { + u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; + + strcpy(str, "PCIX:"); + + if ((clock_ctrl == 7) || + ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == + GRC_MISC_CFG_BOARD_ID_5704CIOBE)) + strcat(str, "133MHz"); + else if (clock_ctrl == 0) + strcat(str, "33MHz"); + else if (clock_ctrl == 2) + strcat(str, "50MHz"); + else if (clock_ctrl == 4) + strcat(str, "66MHz"); + else if (clock_ctrl == 6) + strcat(str, "100MHz"); + } else { + strcpy(str, "PCI:"); + if (tg3_flag(tp, PCI_HIGH_SPEED)) + strcat(str, "66MHz"); + else + strcat(str, "33MHz"); + } + if (tg3_flag(tp, PCI_32BIT)) + strcat(str, ":32-bit"); + else + strcat(str, ":64-bit"); + return str; + } + + static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) + { + struct pci_dev *peer; + unsigned int func, devnr = tp->pdev->devfn & ~7; + + for (func = 0; func < 8; func++) { + peer = pci_get_slot(tp->pdev->bus, devnr | func); + if (peer && peer != tp->pdev) + break; + pci_dev_put(peer); + } + /* 5704 can be configured in single-port mode, set peer to + * tp->pdev in that case. + */ + if (!peer) { + peer = tp->pdev; + return peer; + } + + /* + * We don't need to keep the refcount elevated; there's no way + * to remove one half of this device without removing the other + */ + pci_dev_put(peer); + + return peer; + } + + static void __devinit tg3_init_coal(struct tg3 *tp) + { + struct ethtool_coalesce *ec = &tp->coal; + + memset(ec, 0, sizeof(*ec)); + ec->cmd = ETHTOOL_GCOALESCE; + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; + ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; + ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; + ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; + ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; + ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; + + if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD)) { + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; + } + + if (tg3_flag(tp, 5705_PLUS)) { + ec->rx_coalesce_usecs_irq = 0; + ec->tx_coalesce_usecs_irq = 0; + ec->stats_block_coalesce_usecs = 0; + } + } + + static const struct net_device_ops tg3_netdev_ops = { + .ndo_open = tg3_open, + .ndo_stop = tg3_close, + .ndo_start_xmit = tg3_start_xmit, + .ndo_get_stats64 = tg3_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = tg3_set_rx_mode, + .ndo_set_mac_address = tg3_set_mac_addr, + .ndo_do_ioctl = tg3_ioctl, + .ndo_tx_timeout = tg3_tx_timeout, + .ndo_change_mtu = tg3_change_mtu, + .ndo_fix_features = tg3_fix_features, + .ndo_set_features = tg3_set_features, + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tg3_poll_controller, + #endif + }; + + static int __devinit tg3_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) + { + struct net_device *dev; + struct tg3 *tp; + int i, err, pm_cap; + u32 sndmbx, rcvmbx, intmbx; + char str[40]; + u64 dma_mask, persist_dma_mask; + u32 features = 0; + + printk_once(KERN_INFO "%s\n", version); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + /* Find power-management capability. */ + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm_cap == 0) { + dev_err(&pdev->dev, + "Cannot find Power Management capability, aborting\n"); + err = -EIO; + goto err_out_free_res; + } + + err = pci_set_power_state(pdev, PCI_D0); + if (err) { + dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); + goto err_out_free_res; + } + + dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); + if (!dev) { + dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); + err = -ENOMEM; + goto err_out_power_down; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + tp = netdev_priv(dev); + tp->pdev = pdev; + tp->dev = dev; + tp->pm_cap = pm_cap; + tp->rx_mode = TG3_DEF_RX_MODE; + tp->tx_mode = TG3_DEF_TX_MODE; + + if (tg3_debug > 0) + tp->msg_enable = tg3_debug; + else + tp->msg_enable = TG3_DEF_MSG_ENABLE; + + /* The word/byte swap controls here control register access byte + * swapping. DMA data byte swapping is controlled in the GRC_MODE + * setting below. + */ + tp->misc_host_ctrl = + MISC_HOST_CTRL_MASK_PCI_INT | + MISC_HOST_CTRL_WORD_SWAP | + MISC_HOST_CTRL_INDIR_ACCESS | + MISC_HOST_CTRL_PCISTATE_RW; + + /* The NONFRM (non-frame) byte/word swap controls take effect + * on descriptor entries, anything which isn't packet data. + * + * The StrongARM chips on the board (one for tx, one for rx) + * are running in big-endian mode. + */ + tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | + GRC_MODE_WSWAP_NONFRM_DATA); + #ifdef __BIG_ENDIAN + tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; + #endif + spin_lock_init(&tp->lock); + spin_lock_init(&tp->indirect_lock); + INIT_WORK(&tp->reset_task, tg3_reset_task); + + tp->regs = pci_ioremap_bar(pdev, BAR_0); + if (!tp->regs) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_dev; + } + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { + tg3_flag_set(tp, ENABLE_APE); + tp->aperegs = pci_ioremap_bar(pdev, BAR_2); + if (!tp->aperegs) { + dev_err(&pdev->dev, + "Cannot map APE registers, aborting\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + } + + tp->rx_pending = TG3_DEF_RX_RING_PENDING; + tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; + + dev->ethtool_ops = &tg3_ethtool_ops; + dev->watchdog_timeo = TG3_TX_TIMEOUT; + dev->netdev_ops = &tg3_netdev_ops; + dev->irq = pdev->irq; + + err = tg3_get_invariants(tp); + if (err) { + dev_err(&pdev->dev, + "Problem fetching invariants of chip, aborting\n"); + goto err_out_apeunmap; + } + + /* The EPB bridge inside 5714, 5715, and 5780 and any + * device behind the EPB cannot support DMA addresses > 40-bit. + * On 64-bit systems with IOMMU, use 40-bit dma_mask. + * On 64-bit systems without IOMMU, use 64-bit dma_mask and + * do DMA address check in tg3_start_xmit(). + */ + if (tg3_flag(tp, IS_5788)) + persist_dma_mask = dma_mask = DMA_BIT_MASK(32); + else if (tg3_flag(tp, 40BIT_DMA_BUG)) { + persist_dma_mask = dma_mask = DMA_BIT_MASK(40); + #ifdef CONFIG_HIGHMEM + dma_mask = DMA_BIT_MASK(64); + #endif + } else + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); + + /* Configure DMA attributes. */ + if (dma_mask > DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, dma_mask); + if (!err) { + features |= NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, + persist_dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "Unable to obtain 64 bit " + "DMA for consistent allocations\n"); + goto err_out_apeunmap; + } + } + } + if (err || dma_mask == DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_out_apeunmap; + } + } + + tg3_init_bufmgr_config(tp); + + features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + + /* 5700 B0 chips do not support checksumming correctly due + * to hardware bugs. + */ + if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { + features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + + if (tg3_flag(tp, 5755_PLUS)) + features |= NETIF_F_IPV6_CSUM; + } + + /* TSO is on by default on chips that support hardware TSO. + * Firmware TSO on older chips gives lower performance, so it + * is off by default, but can be enabled using ethtool. + */ + if ((tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) && + (features & NETIF_F_IP_CSUM)) + features |= NETIF_F_TSO; + if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { + if (features & NETIF_F_IPV6_CSUM) + features |= NETIF_F_TSO6; + if (tg3_flag(tp, HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + features |= NETIF_F_TSO_ECN; + } + + dev->features |= features; + dev->vlan_features |= features; + + /* + * Add loopback capability only for a subset of devices that support + * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY + * loopback for the remaining devices. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) + /* Add the loopback capability */ + features |= NETIF_F_LOOPBACK; + + dev->hw_features |= features; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && + !tg3_flag(tp, TSO_CAPABLE) && + !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { + tg3_flag_set(tp, MAX_RXPEND_64); + tp->rx_pending = 63; + } + + err = tg3_get_device_address(tp); + if (err) { + dev_err(&pdev->dev, + "Could not obtain valid ethernet address, aborting\n"); + goto err_out_apeunmap; + } + + /* + * Reset chip in case UNDI or EFI driver did not shutdown + * DMA self test will enable WDMAC and we'll see (spurious) + * pending DMA on the PCI bus at that point. + */ + if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || + (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + } + + err = tg3_test_dma(tp); + if (err) { + dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); + goto err_out_apeunmap; + } + + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < tp->irq_max; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->tp = tp; + tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; + + tnapi->int_mbox = intmbx; + if (i <= 4) + intmbx += 0x8; + else + intmbx += 0x4; + + tnapi->consmbox = rcvmbx; + tnapi->prodmbox = sndmbx; + + if (i) + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + else + tnapi->coal_now = HOSTCC_MODE_NOW; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + break; + + /* + * If we support MSIX, we'll be using RSS. If we're using + * RSS, the first vector only handles link interrupts and the + * remaining vectors handle rx and tx interrupts. Reuse the + * mailbox values for the next iteration. The values we setup + * above are still useful for the single vectored mode. + */ + if (!i) + continue; + + rcvmbx += 0x8; + + if (sndmbx & 0x4) + sndmbx -= 0x4; + else + sndmbx += 0xc; + } + + tg3_init_coal(tp); + + pci_set_drvdata(pdev, dev); + + if (tg3_flag(tp, 5717_PLUS)) { + /* Resume a low-power mode */ + tg3_frob_aux_power(tp, false); + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting\n"); + goto err_out_apeunmap; + } + + netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", + tp->board_part_number, + tp->pci_chip_rev_id, + tg3_bus_string(tp, str), + dev->dev_addr); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + struct phy_device *phydev; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + } else { + char *ethtype; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + ethtype = "10/100Base-TX"; + else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + ethtype = "1000Base-SX"; + else + ethtype = "10/100/1000Base-T"; + + netdev_info(dev, "attached PHY is %s (%s Ethernet) " + "(WireSpeed[%d], EEE[%d])\n", + tg3_phy_string(tp), ethtype, + (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, + (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); + } + + netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", + (dev->features & NETIF_F_RXCSUM) != 0, + tg3_flag(tp, USE_LINKCHG_REG) != 0, + (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, + tg3_flag(tp, ENABLE_ASF) != 0, + tg3_flag(tp, TSO_CAPABLE) != 0); + netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", + tp->dma_rwctrl, + pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : + ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); + + pci_save_state(pdev); + + return 0; + + err_out_apeunmap: + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + + err_out_iounmap: + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + + err_out_free_dev: + free_netdev(dev); + + err_out_power_down: + pci_set_power_state(pdev, PCI_D3hot); + + err_out_free_res: + pci_release_regions(pdev); + + err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; + } + + static void __devexit tg3_remove_one(struct pci_dev *pdev) + { + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct tg3 *tp = netdev_priv(dev); + + if (tp->fw) + release_firmware(tp->fw); + + cancel_work_sync(&tp->reset_task); + - if (!tg3_flag(tp, USE_PHYLIB)) { ++ if (tg3_flag(tp, USE_PHYLIB)) { + tg3_phy_fini(tp); + tg3_mdio_fini(tp); + } + + unregister_netdev(dev); + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } + } + + #ifdef CONFIG_PM_SLEEP + static int tg3_suspend(struct device *device) + { + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return 0; + + flush_work_sync(&tp->reset_task); + tg3_phy_stop(tp); + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + + tg3_full_lock(tp, 1); + tg3_disable_ints(tp); + tg3_full_unlock(tp); + + netif_device_detach(dev); + + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_flag_clear(tp, INIT_COMPLETE); + tg3_full_unlock(tp); + + err = tg3_power_down_prepare(tp); + if (err) { + int err2; + + tg3_full_lock(tp, 0); + + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, 1); + if (err2) + goto out; + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + netif_device_attach(dev); + tg3_netif_start(tp); + + out: + tg3_full_unlock(tp); + + if (!err2) + tg3_phy_start(tp); + } + + return err; + } + + static int tg3_resume(struct device *device) + { + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return 0; + + netif_device_attach(dev); + + tg3_full_lock(tp, 0); + + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + if (err) + goto out; + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + + out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; + } + + static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); + #define TG3_PM_OPS (&tg3_pm_ops) + + #else + + #define TG3_PM_OPS NULL + + #endif /* CONFIG_PM_SLEEP */ + + /** + * tg3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ + static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; + + netdev_info(netdev, "PCI I/O error detected\n"); + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + tg3_flag_clear(tp, RESTART_TIMER); + + /* Want to make sure that the reset task doesn't run */ + cancel_work_sync(&tp->reset_task); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + tg3_flag_clear(tp, RESTART_TIMER); + + netif_device_detach(netdev); + + /* Clean up software state, even if MMIO is blocked */ + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + tg3_full_unlock(tp); + + done: + if (state == pci_channel_io_perm_failure) + err = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return err; + } + + /** + * tg3_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ + static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + int err; + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); + goto done; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!netif_running(netdev)) { + rc = PCI_ERS_RESULT_RECOVERED; + goto done; + } + + err = tg3_power_up(tp); + if (err) + goto done; + + rc = PCI_ERS_RESULT_RECOVERED; + + done: + rtnl_unlock(); + + return rc; + } + + /** + * tg3_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ + static void tg3_io_resume(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + int err; + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_full_lock(tp, 0); + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + tg3_full_unlock(tp); + if (err) { + netdev_err(netdev, "Cannot restart hardware after reset.\n"); + goto done; + } + + netif_device_attach(netdev); + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + + tg3_phy_start(tp); + + done: + rtnl_unlock(); + } + + static struct pci_error_handlers tg3_err_handler = { + .error_detected = tg3_io_error_detected, + .slot_reset = tg3_io_slot_reset, + .resume = tg3_io_resume + }; + + static struct pci_driver tg3_driver = { + .name = DRV_MODULE_NAME, + .id_table = tg3_pci_tbl, + .probe = tg3_init_one, + .remove = __devexit_p(tg3_remove_one), + .err_handler = &tg3_err_handler, + .driver.pm = TG3_PM_OPS, + }; + + static int __init tg3_init(void) + { + return pci_register_driver(&tg3_driver); + } + + static void __exit tg3_cleanup(void) + { + pci_unregister_driver(&tg3_driver); + } + + module_init(tg3_init); + module_exit(tg3_cleanup); diff --combined drivers/net/ethernet/cadence/at91_ether.c index 0000000,1b0ba8c..56624d3 mode 000000,100644..100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@@ -1,0 -1,1254 +1,1254 @@@ + /* + * Ethernet driver for the Atmel AT91RM9200 (Thunder) + * + * Copyright (C) 2003 SAN People (Pty) Ltd + * + * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc. + * Initial version by Rick Bronson 01/11/2003 + * + * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker + * (Polaroid Corporation) + * + * Realtek RTL8201(B)L PHY support by Roman Avramenko roman@imsystems.ru + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + + #include <linux/module.h> + #include <linux/init.h> + #include <linux/interrupt.h> + #include <linux/mii.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/skbuff.h> + #include <linux/dma-mapping.h> + #include <linux/ethtool.h> + #include <linux/platform_device.h> + #include <linux/clk.h> + #include <linux/gfp.h> + + #include <asm/io.h> + #include <asm/uaccess.h> + #include <asm/mach-types.h> + + #include <mach/at91rm9200_emac.h> -#include <mach/gpio.h> ++#include <asm/gpio.h> + #include <mach/board.h> + + #include "at91_ether.h" + + #define DRV_NAME "at91_ether" + #define DRV_VERSION "1.0" + + #define LINK_POLL_INTERVAL (HZ) + + /* ..................................................................... */ + + /* + * Read from a EMAC register. + */ + static inline unsigned long at91_emac_read(unsigned int reg) + { + void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; + + return __raw_readl(emac_base + reg); + } + + /* + * Write to a EMAC register. + */ + static inline void at91_emac_write(unsigned int reg, unsigned long value) + { + void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; + + __raw_writel(value, emac_base + reg); + } + + /* ........................... PHY INTERFACE ........................... */ + + /* + * Enable the MDIO bit in MAC control register + * When not called from an interrupt-handler, access to the PHY must be + * protected by a spinlock. + */ + static void enable_mdi(void) + { + unsigned long ctl; + + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */ + } + + /* + * Disable the MDIO bit in the MAC control register + */ + static void disable_mdi(void) + { + unsigned long ctl; + + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */ + } + + /* + * Wait until the PHY operation is complete. + */ + static inline void at91_phy_wait(void) { + unsigned long timeout = jiffies + 2; + + while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) { + if (time_after(jiffies, timeout)) { + printk("at91_ether: MIO timeout\n"); + break; + } + cpu_relax(); + } + } + + /* + * Write value to the a PHY register + * Note: MDI interface is assumed to already have been enabled. + */ + static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value) + { + at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W + | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA)); + + /* Wait until IDLE bit in Network Status register is cleared */ + at91_phy_wait(); + } + + /* + * Read value stored in a PHY register. + * Note: MDI interface is assumed to already have been enabled. + */ + static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value) + { + at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R + | ((phy_addr & 0x1f) << 23) | (address << 18)); + + /* Wait until IDLE bit in Network Status register is cleared */ + at91_phy_wait(); + + *value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA; + } + + /* ........................... PHY MANAGEMENT .......................... */ + + /* + * Access the PHY to determine the current link speed and mode, and update the + * MAC accordingly. + * If no link or auto-negotiation is busy, then no changes are made. + */ + static void update_linkspeed(struct net_device *dev, int silent) + { + struct at91_private *lp = netdev_priv(dev); + unsigned int bmsr, bmcr, lpa, mac_cfg; + unsigned int speed, duplex; + + if (!mii_link_ok(&lp->mii)) { /* no link */ + netif_carrier_off(dev); + if (!silent) + printk(KERN_INFO "%s: Link down.\n", dev->name); + return; + } + + /* Link up, or auto-negotiation still in progress */ + read_phy(lp->phy_address, MII_BMSR, &bmsr); + read_phy(lp->phy_address, MII_BMCR, &bmcr); + if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */ + if (!(bmsr & BMSR_ANEGCOMPLETE)) + return; /* Do nothing - another interrupt generated when negotiation complete */ + + read_phy(lp->phy_address, MII_LPA, &lpa); + if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100; + else speed = SPEED_10; + if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL; + else duplex = DUPLEX_HALF; + } else { + speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; + duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + /* Update the MAC */ + mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD); + if (speed == SPEED_100) { + if (duplex == DUPLEX_FULL) /* 100 Full Duplex */ + mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD; + else /* 100 Half Duplex */ + mac_cfg |= AT91_EMAC_SPD; + } else { + if (duplex == DUPLEX_FULL) /* 10 Full Duplex */ + mac_cfg |= AT91_EMAC_FD; + else {} /* 10 Half Duplex */ + } + at91_emac_write(AT91_EMAC_CFG, mac_cfg); + + if (!silent) + printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); + netif_carrier_on(dev); + } + + /* + * Handle interrupts from the PHY + */ + static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id) + { + struct net_device *dev = (struct net_device *) dev_id; + struct at91_private *lp = netdev_priv(dev); + unsigned int phy; + + /* + * This hander is triggered on both edges, but the PHY chips expect + * level-triggering. We therefore have to check if the PHY actually has + * an IRQ pending. + */ + enable_mdi(); + if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { + read_phy(lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */ + if (!(phy & (1 << 0))) + goto done; + } + else if (lp->phy_type == MII_LXT971A_ID) { + read_phy(lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */ + if (!(phy & (1 << 2))) + goto done; + } + else if (lp->phy_type == MII_BCM5221_ID) { + read_phy(lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */ + if (!(phy & (1 << 0))) + goto done; + } + else if (lp->phy_type == MII_KS8721_ID) { + read_phy(lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */ + if (!(phy & ((1 << 2) | 1))) + goto done; + } + else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */ + read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy); + if (!(phy & ((1 << 2) | 1))) + goto done; + } + else if (lp->phy_type == MII_DP83848_ID) { + read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */ + if (!(phy & (1 << 7))) + goto done; + } + + update_linkspeed(dev, 0); + + done: + disable_mdi(); + + return IRQ_HANDLED; + } + + /* + * Initialize and enable the PHY interrupt for link-state changes + */ + static void enable_phyirq(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned int dsintr, irq_number; + int status; + + irq_number = lp->board_data.phy_irq_pin; + if (!irq_number) { + /* + * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L), + * or board does not have it connected. + */ + mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL); + return; + } + + status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev); + if (status) { + printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status); + return; + } + + spin_lock_irq(&lp->lock); + enable_mdi(); + + if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ + read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); + dsintr = dsintr & ~0xf00; /* clear bits 8..11 */ + write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); + } + else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ + read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); + dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */ + write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); + } + else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ + dsintr = (1 << 15) | ( 1 << 14); + write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); + } + else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ + dsintr = (1 << 10) | ( 1 << 8); + write_phy(lp->phy_address, MII_TPISTATUS, dsintr); + } + else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ + read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); + dsintr = dsintr | 0x500; /* set bits 8, 10 */ + write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); + } + else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ + read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); + dsintr = dsintr | 0x3c; /* set bits 2..5 */ + write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); + read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); + dsintr = dsintr | 0x3; /* set bits 0,1 */ + write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); + } + + disable_mdi(); + spin_unlock_irq(&lp->lock); + } + + /* + * Disable the PHY interrupt + */ + static void disable_phyirq(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned int dsintr; + unsigned int irq_number; + + irq_number = lp->board_data.phy_irq_pin; + if (!irq_number) { + del_timer_sync(&lp->check_timer); + return; + } + + spin_lock_irq(&lp->lock); + enable_mdi(); + + if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ + read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); + dsintr = dsintr | 0xf00; /* set bits 8..11 */ + write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); + } + else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ + read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); + dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */ + write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); + } + else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ + read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr); + dsintr = ~(1 << 14); + write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); + } + else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ + read_phy(lp->phy_address, MII_TPISTATUS, &dsintr); + dsintr = ~((1 << 10) | (1 << 8)); + write_phy(lp->phy_address, MII_TPISTATUS, dsintr); + } + else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ + read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); + dsintr = dsintr & ~0x500; /* clear bits 8, 10 */ + write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); + } + else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ + read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); + dsintr = dsintr & ~0x3; /* clear bits 0, 1 */ + write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); + read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); + dsintr = dsintr & ~0x3c; /* clear bits 2..5 */ + write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); + } + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + free_irq(irq_number, dev); /* Free interrupt handler */ + } + + /* + * Perform a software reset of the PHY. + */ + #if 0 + static void reset_phy(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned int bmcr; + + spin_lock_irq(&lp->lock); + enable_mdi(); + + /* Perform PHY reset */ + write_phy(lp->phy_address, MII_BMCR, BMCR_RESET); + + /* Wait until PHY reset is complete */ + do { + read_phy(lp->phy_address, MII_BMCR, &bmcr); + } while (!(bmcr & BMCR_RESET)); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + } + #endif + + static void at91ether_check_link(unsigned long dev_id) + { + struct net_device *dev = (struct net_device *) dev_id; + struct at91_private *lp = netdev_priv(dev); + + enable_mdi(); + update_linkspeed(dev, 1); + disable_mdi(); + + mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL); + } + + /* ......................... ADDRESS MANAGEMENT ........................ */ + + /* + * NOTE: Your bootloader must always set the MAC address correctly before + * booting into Linux. + * + * - It must always set the MAC address after reset, even if it doesn't + * happen to access the Ethernet while it's booting. Some versions of + * U-Boot on the AT91RM9200-DK do not do this. + * + * - Likewise it must store the addresses in the correct byte order. + * MicroMonitor (uMon) on the CSB337 does this incorrectly (and + * continues to do so, for bug-compatibility). + */ + + static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo) + { + char addr[6]; + + if (machine_is_csb337()) { + addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */ + addr[4] = (lo & 0xff00) >> 8; + addr[3] = (lo & 0xff0000) >> 16; + addr[2] = (lo & 0xff000000) >> 24; + addr[1] = (hi & 0xff); + addr[0] = (hi & 0xff00) >> 8; + } + else { + addr[0] = (lo & 0xff); + addr[1] = (lo & 0xff00) >> 8; + addr[2] = (lo & 0xff0000) >> 16; + addr[3] = (lo & 0xff000000) >> 24; + addr[4] = (hi & 0xff); + addr[5] = (hi & 0xff00) >> 8; + } + + if (is_valid_ether_addr(addr)) { + memcpy(dev->dev_addr, &addr, 6); + return 1; + } + return 0; + } + + /* + * Set the ethernet MAC address in dev->dev_addr + */ + static void __init get_mac_address(struct net_device *dev) + { + /* Check Specific-Address 1 */ + if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L))) + return; + /* Check Specific-Address 2 */ + if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L))) + return; + /* Check Specific-Address 3 */ + if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L))) + return; + /* Check Specific-Address 4 */ + if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L))) + return; + + printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n"); + } + + /* + * Program the hardware MAC address from dev->dev_addr. + */ + static void update_mac_address(struct net_device *dev) + { + at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0])); + at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4])); + + at91_emac_write(AT91_EMAC_SA2L, 0); + at91_emac_write(AT91_EMAC_SA2H, 0); + } + + /* + * Store the new hardware address in dev->dev_addr, and update the MAC. + */ + static int set_mac_address(struct net_device *dev, void* addr) + { + struct sockaddr *address = addr; + + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, address->sa_data, dev->addr_len); + update_mac_address(dev); + + printk("%s: Setting MAC address to %pM\n", dev->name, + dev->dev_addr); + + return 0; + } + + static int inline hash_bit_value(int bitnr, __u8 *addr) + { + if (addr[bitnr / 8] & (1 << (bitnr % 8))) + return 1; + return 0; + } + + /* + * The hash address register is 64 bits long and takes up two locations in the memory map. + * The least significant bits are stored in EMAC_HSL and the most significant + * bits in EMAC_HSH. + * + * The unicast hash enable and the multicast hash enable bits in the network configuration + * register enable the reception of hash matched frames. The destination address is + * reduced to a 6 bit index into the 64 bit hash register using the following hash function. + * The hash function is an exclusive or of every sixth bit of the destination address. + * hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] + * hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] + * hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] + * hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] + * hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] + * hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] + * da[0] represents the least significant bit of the first byte received, that is, the multicast/ + * unicast indicator, and da[47] represents the most significant bit of the last byte + * received. + * If the hash index points to a bit that is set in the hash register then the frame will be + * matched according to whether the frame is multicast or unicast. + * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and + * the hash index points to a bit set in the hash register. + * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the + * hash index points to a bit set in the hash register. + * To receive all multicast frames, the hash register should be set with all ones and the + * multicast hash enable bit should be set in the network configuration register. + */ + + /* + * Return the hash index value for the specified address. + */ + static int hash_get_index(__u8 *addr) + { + int i, j, bitval; + int hash_index = 0; + + for (j = 0; j < 6; j++) { + for (i = 0, bitval = 0; i < 8; i++) + bitval ^= hash_bit_value(i*6 + j, addr); + + hash_index |= (bitval << j); + } + + return hash_index; + } + + /* + * Add multicast addresses to the internal multicast-hash table. + */ + static void at91ether_sethashtable(struct net_device *dev) + { + struct netdev_hw_addr *ha; + unsigned long mc_filter[2]; + unsigned int bitnr; + + mc_filter[0] = mc_filter[1] = 0; + + netdev_for_each_mc_addr(ha, dev) { + bitnr = hash_get_index(ha->addr); + mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); + } + + at91_emac_write(AT91_EMAC_HSL, mc_filter[0]); + at91_emac_write(AT91_EMAC_HSH, mc_filter[1]); + } + + /* + * Enable/Disable promiscuous and multicast modes. + */ + static void at91ether_set_multicast_list(struct net_device *dev) + { + unsigned long cfg; + + cfg = at91_emac_read(AT91_EMAC_CFG); + + if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */ + cfg |= AT91_EMAC_CAF; + else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */ + cfg &= ~AT91_EMAC_CAF; + + if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */ + at91_emac_write(AT91_EMAC_HSH, -1); + at91_emac_write(AT91_EMAC_HSL, -1); + cfg |= AT91_EMAC_MTI; + } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */ + at91ether_sethashtable(dev); + cfg |= AT91_EMAC_MTI; + } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */ + at91_emac_write(AT91_EMAC_HSH, 0); + at91_emac_write(AT91_EMAC_HSL, 0); + cfg &= ~AT91_EMAC_MTI; + } + + at91_emac_write(AT91_EMAC_CFG, cfg); + } + + /* ......................... ETHTOOL SUPPORT ........................... */ + + static int mdio_read(struct net_device *dev, int phy_id, int location) + { + unsigned int value; + + read_phy(phy_id, location, &value); + return value; + } + + static void mdio_write(struct net_device *dev, int phy_id, int location, int value) + { + write_phy(phy_id, location, value); + } + + static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct at91_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + enable_mdi(); + + ret = mii_ethtool_gset(&lp->mii, cmd); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */ + cmd->supported = SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } + + return ret; + } + + static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct at91_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + enable_mdi(); + + ret = mii_ethtool_sset(&lp->mii, cmd); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + return ret; + } + + static int at91ether_nwayreset(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + enable_mdi(); + + ret = mii_nway_restart(&lp->mii); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + return ret; + } + + static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) + { + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); + } + + static const struct ethtool_ops at91ether_ethtool_ops = { + .get_settings = at91ether_get_settings, + .set_settings = at91ether_set_settings, + .get_drvinfo = at91ether_get_drvinfo, + .nway_reset = at91ether_nwayreset, + .get_link = ethtool_op_get_link, + }; + + static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + { + struct at91_private *lp = netdev_priv(dev); + int res; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irq(&lp->lock); + enable_mdi(); + res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL); + disable_mdi(); + spin_unlock_irq(&lp->lock); + + return res; + } + + /* ................................ MAC ................................ */ + + /* + * Initialize and start the Receiver and Transmit subsystems + */ + static void at91ether_start(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + struct recv_desc_bufs *dlist, *dlist_phys; + int i; + unsigned long ctl; + + dlist = lp->dlist; + dlist_phys = lp->dlist_phys; + + for (i = 0; i < MAX_RX_DESCR; i++) { + dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0]; + dlist->descriptors[i].size = 0; + } + + /* Set the Wrap bit on the last descriptor */ + dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP; + + /* Reset buffer index */ + lp->rxBuffIndex = 0; + + /* Program address of descriptor list in Rx Buffer Queue register */ + at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys); + + /* Enable Receive and Transmit */ + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE); + } + + /* + * Open the ethernet interface + */ + static int at91ether_open(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned long ctl; + + if (!is_valid_ether_addr(dev->dev_addr)) + return -EADDRNOTAVAIL; + + clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */ + + /* Clear internal statistics */ + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR); + + /* Update the MAC address (incase user has changed it) */ + update_mac_address(dev); + + /* Enable PHY interrupt */ + enable_phyirq(dev); + + /* Enable MAC interrupts */ + at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA + | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM + | AT91_EMAC_ROVR | AT91_EMAC_ABT); + + /* Determine current link speed */ + spin_lock_irq(&lp->lock); + enable_mdi(); + update_linkspeed(dev, 0); + disable_mdi(); + spin_unlock_irq(&lp->lock); + + at91ether_start(dev); + netif_start_queue(dev); + return 0; + } + + /* + * Close the interface + */ + static int at91ether_close(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned long ctl; + + /* Disable Receiver and Transmitter */ + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE)); + + /* Disable PHY interrupt */ + disable_phyirq(dev); + + /* Disable MAC interrupts */ + at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA + | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM + | AT91_EMAC_ROVR | AT91_EMAC_ABT); + + netif_stop_queue(dev); + + clk_disable(lp->ether_clk); /* Disable Peripheral clock */ + + return 0; + } + + /* + * Transmit packet. + */ + static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + + if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) { + netif_stop_queue(dev); + + /* Store packet information (to free when Tx completed) */ + lp->skb = skb; + lp->skb_length = skb->len; + lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); + dev->stats.tx_bytes += skb->len; + + /* Set address of the data in the Transmit Address register */ + at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr); + /* Set length of the packet in the Transmit Control register */ + at91_emac_write(AT91_EMAC_TCR, skb->len); + + } else { + printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); + return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) + on this skb, he also reports -ENETDOWN and printk's, so either + we free and return(0) or don't free and return 1 */ + } + + return NETDEV_TX_OK; + } + + /* + * Update the current statistics from the internal statistics registers. + */ + static struct net_device_stats *at91ether_stats(struct net_device *dev) + { + int ale, lenerr, seqe, lcol, ecol; + + if (netif_running(dev)) { + dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */ + ale = at91_emac_read(AT91_EMAC_ALE); + dev->stats.rx_frame_errors += ale; /* Alignment errors */ + lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF); + dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */ + seqe = at91_emac_read(AT91_EMAC_SEQE); + dev->stats.rx_crc_errors += seqe; /* CRC error */ + dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */ + dev->stats.rx_errors += (ale + lenerr + seqe + + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB)); + + dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */ + dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */ + dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */ + dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */ + + lcol = at91_emac_read(AT91_EMAC_LCOL); + ecol = at91_emac_read(AT91_EMAC_ECOL); + dev->stats.tx_window_errors += lcol; /* Late collisions */ + dev->stats.tx_aborted_errors += ecol; /* 16 collisions */ + + dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol); + } + return &dev->stats; + } + + /* + * Extract received frame from buffer descriptors and sent to upper layers. + * (Called from interrupt context) + */ + static void at91ether_rx(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + struct recv_desc_bufs *dlist; + unsigned char *p_recv; + struct sk_buff *skb; + unsigned int pktlen; + + dlist = lp->dlist; + while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) { + p_recv = dlist->recv_buf[lp->rxBuffIndex]; + pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */ + skb = dev_alloc_skb(pktlen + 2); + if (skb != NULL) { + skb_reserve(skb, 2); + memcpy(skb_put(skb, pktlen), p_recv, pktlen); + + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_bytes += pktlen; + netif_rx(skb); + } + else { + dev->stats.rx_dropped += 1; + printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); + } + + if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST) + dev->stats.multicast++; + + dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */ + if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */ + lp->rxBuffIndex = 0; + else + lp->rxBuffIndex++; + } + } + + /* + * MAC interrupt handler + */ + static irqreturn_t at91ether_interrupt(int irq, void *dev_id) + { + struct net_device *dev = (struct net_device *) dev_id; + struct at91_private *lp = netdev_priv(dev); + unsigned long intstatus, ctl; + + /* MAC Interrupt Status register indicates what interrupts are pending. + It is automatically cleared once read. */ + intstatus = at91_emac_read(AT91_EMAC_ISR); + + if (intstatus & AT91_EMAC_RCOM) /* Receive complete */ + at91ether_rx(dev); + + if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */ + /* The TCOM bit is set even if the transmission failed. */ + if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY)) + dev->stats.tx_errors += 1; + + if (lp->skb) { + dev_kfree_skb_irq(lp->skb); + lp->skb = NULL; + dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); + } + netif_wake_queue(dev); + } + + /* Work-around for Errata #11 */ + if (intstatus & AT91_EMAC_RBNA) { + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE); + at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE); + } + + if (intstatus & AT91_EMAC_ROVR) + printk("%s: ROVR error\n", dev->name); + + return IRQ_HANDLED; + } + + #ifdef CONFIG_NET_POLL_CONTROLLER + static void at91ether_poll_controller(struct net_device *dev) + { + unsigned long flags; + + local_irq_save(flags); + at91ether_interrupt(dev->irq, dev); + local_irq_restore(flags); + } + #endif + + static const struct net_device_ops at91ether_netdev_ops = { + .ndo_open = at91ether_open, + .ndo_stop = at91ether_close, + .ndo_start_xmit = at91ether_start_xmit, + .ndo_get_stats = at91ether_stats, + .ndo_set_rx_mode = at91ether_set_multicast_list, + .ndo_set_mac_address = set_mac_address, + .ndo_do_ioctl = at91ether_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = at91ether_poll_controller, + #endif + }; + + /* + * Initialize the ethernet interface + */ + static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address, + struct platform_device *pdev, struct clk *ether_clk) + { + struct at91_eth_data *board_data = pdev->dev.platform_data; + struct net_device *dev; + struct at91_private *lp; + unsigned int val; + int res; + + dev = alloc_etherdev(sizeof(struct at91_private)); + if (!dev) + return -ENOMEM; + + dev->base_addr = AT91_VA_BASE_EMAC; + dev->irq = AT91RM9200_ID_EMAC; + + /* Install the interrupt handler */ + if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) { + free_netdev(dev); + return -EBUSY; + } + + /* Allocate memory for DMA Receive descriptors */ + lp = netdev_priv(dev); + lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL); + if (lp->dlist == NULL) { + free_irq(dev->irq, dev); + free_netdev(dev); + return -ENOMEM; + } + lp->board_data = *board_data; + lp->ether_clk = ether_clk; + platform_set_drvdata(pdev, dev); + + spin_lock_init(&lp->lock); + + ether_setup(dev); + dev->netdev_ops = &at91ether_netdev_ops; + dev->ethtool_ops = &at91ether_ethtool_ops; + + SET_NETDEV_DEV(dev, &pdev->dev); + + get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */ + update_mac_address(dev); /* Program ethernet address into MAC */ + + at91_emac_write(AT91_EMAC_CTL, 0); + + if (lp->board_data.is_rmii) + at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII); + else + at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG); + + /* Perform PHY-specific initialization */ + spin_lock_irq(&lp->lock); + enable_mdi(); + if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { + read_phy(phy_address, MII_DSCR_REG, &val); + if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */ + lp->phy_media = PORT_FIBRE; + } else if (machine_is_csb337()) { + /* mix link activity status into LED2 link state */ + write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22); + } else if (machine_is_ecbat91()) + write_phy(phy_address, MII_LEDCTRL_REG, 0x156A); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + lp->mii.dev = dev; /* Support for ethtool */ + lp->mii.mdio_read = mdio_read; + lp->mii.mdio_write = mdio_write; + lp->mii.phy_id = phy_address; + lp->mii.phy_id_mask = 0x1f; + lp->mii.reg_num_mask = 0x1f; + + lp->phy_type = phy_type; /* Type of PHY connected */ + lp->phy_address = phy_address; /* MDI address of PHY */ + + /* Register the network interface */ + res = register_netdev(dev); + if (res) { + free_irq(dev->irq, dev); + free_netdev(dev); + dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys); + return res; + } + + /* Determine current link speed */ + spin_lock_irq(&lp->lock); + enable_mdi(); + update_linkspeed(dev, 0); + disable_mdi(); + spin_unlock_irq(&lp->lock); + netif_carrier_off(dev); /* will be enabled in open() */ + + /* If board has no PHY IRQ, use a timer to poll the PHY */ + if (!lp->board_data.phy_irq_pin) { + init_timer(&lp->check_timer); + lp->check_timer.data = (unsigned long)dev; + lp->check_timer.function = at91ether_check_link; + } else if (lp->board_data.phy_irq_pin >= 32) + gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy"); + + /* Display ethernet banner */ + printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n", + dev->name, (uint) dev->base_addr, dev->irq, + at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", + at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", + dev->dev_addr); + if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) + printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)"); + else if (phy_type == MII_LXT971A_ID) + printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name); + else if (phy_type == MII_RTL8201_ID) + printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name); + else if (phy_type == MII_BCM5221_ID) + printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name); + else if (phy_type == MII_DP83847_ID) + printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name); + else if (phy_type == MII_DP83848_ID) + printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name); + else if (phy_type == MII_AC101L_ID) + printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name); + else if (phy_type == MII_KS8721_ID) + printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name); + else if (phy_type == MII_T78Q21x3_ID) + printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name); + else if (phy_type == MII_LAN83C185_ID) + printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name); + + return 0; + } + + /* + * Detect MAC and PHY and perform initialization + */ + static int __init at91ether_probe(struct platform_device *pdev) + { + unsigned int phyid1, phyid2; + int detected = -1; + unsigned long phy_id; + unsigned short phy_address = 0; + struct clk *ether_clk; + + ether_clk = clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(ether_clk)) { + printk(KERN_ERR "at91_ether: no clock defined\n"); + return -ENODEV; + } + clk_enable(ether_clk); /* Enable Peripheral clock */ + + while ((detected != 0) && (phy_address < 32)) { + /* Read the PHY ID registers */ + enable_mdi(); + read_phy(phy_address, MII_PHYSID1, &phyid1); + read_phy(phy_address, MII_PHYSID2, &phyid2); + disable_mdi(); + + phy_id = (phyid1 << 16) | (phyid2 & 0xfff0); + switch (phy_id) { + case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */ + case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */ + case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */ + case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */ + case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */ + case MII_DP83847_ID: /* National Semiconductor DP83847: */ + case MII_DP83848_ID: /* National Semiconductor DP83848: */ + case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */ + case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */ + case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */ + case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */ + detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk); + break; + } + + phy_address++; + } + + clk_disable(ether_clk); /* Disable Peripheral clock */ + + return detected; + } + + static int __devexit at91ether_remove(struct platform_device *pdev) + { + struct net_device *dev = platform_get_drvdata(pdev); + struct at91_private *lp = netdev_priv(dev); + + if (lp->board_data.phy_irq_pin >= 32) + gpio_free(lp->board_data.phy_irq_pin); + + unregister_netdev(dev); + free_irq(dev->irq, dev); + dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys); + clk_put(lp->ether_clk); + + platform_set_drvdata(pdev, NULL); + free_netdev(dev); + return 0; + } + + #ifdef CONFIG_PM + + static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) + { + struct net_device *net_dev = platform_get_drvdata(pdev); + struct at91_private *lp = netdev_priv(net_dev); + int phy_irq = lp->board_data.phy_irq_pin; + + if (netif_running(net_dev)) { + if (phy_irq) + disable_irq(phy_irq); + + netif_stop_queue(net_dev); + netif_device_detach(net_dev); + + clk_disable(lp->ether_clk); + } + return 0; + } + + static int at91ether_resume(struct platform_device *pdev) + { + struct net_device *net_dev = platform_get_drvdata(pdev); + struct at91_private *lp = netdev_priv(net_dev); + int phy_irq = lp->board_data.phy_irq_pin; + + if (netif_running(net_dev)) { + clk_enable(lp->ether_clk); + + netif_device_attach(net_dev); + netif_start_queue(net_dev); + + if (phy_irq) + enable_irq(phy_irq); + } + return 0; + } + + #else + #define at91ether_suspend NULL + #define at91ether_resume NULL + #endif + + static struct platform_driver at91ether_driver = { + .remove = __devexit_p(at91ether_remove), + .suspend = at91ether_suspend, + .resume = at91ether_resume, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + }; + + static int __init at91ether_init(void) + { + return platform_driver_probe(&at91ether_driver, at91ether_probe); + } + + static void __exit at91ether_exit(void) + { + platform_driver_unregister(&at91ether_driver); + } + + module_init(at91ether_init) + module_exit(at91ether_exit) + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); + MODULE_AUTHOR("Andrew Victor"); + MODULE_ALIAS("platform:" DRV_NAME); diff --combined drivers/net/ethernet/jme.c index 0000000,7a0c746f..7becff1 mode 000000,100644..100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@@ -1,0 -1,3236 +1,3242 @@@ + /* + * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver + * + * Copyright 2008 JMicron Technology Corporation + * http://www.jmicron.com/ + * Copyright (c) 2009 - 2010 Guo-Fu Tseng cooldavid@cooldavid.org + * + * Author: Guo-Fu Tseng cooldavid@cooldavid.org + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include <linux/module.h> + #include <linux/kernel.h> + #include <linux/pci.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/ethtool.h> + #include <linux/mii.h> + #include <linux/crc32.h> + #include <linux/delay.h> + #include <linux/spinlock.h> + #include <linux/in.h> + #include <linux/ip.h> + #include <linux/ipv6.h> + #include <linux/tcp.h> + #include <linux/udp.h> + #include <linux/if_vlan.h> + #include <linux/slab.h> + #include <net/ip6_checksum.h> + #include "jme.h" + + static int force_pseudohp = -1; + static int no_pseudohp = -1; + static int no_extplug = -1; + module_param(force_pseudohp, int, 0); + MODULE_PARM_DESC(force_pseudohp, + "Enable pseudo hot-plug feature manually by driver instead of BIOS."); + module_param(no_pseudohp, int, 0); + MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); + module_param(no_extplug, int, 0); + MODULE_PARM_DESC(no_extplug, + "Do not use external plug signal for pseudo hot-plug."); + + static int + jme_mdio_read(struct net_device *netdev, int phy, int reg) + { + struct jme_adapter *jme = netdev_priv(netdev); + int i, val, again = (reg == MII_BMSR) ? 1 : 0; + + read_again: + jwrite32(jme, JME_SMI, SMI_OP_REQ | + smi_phy_addr(phy) | + smi_reg_addr(reg)); + + wmb(); + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + val = jread32(jme, JME_SMI); + if ((val & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) { + pr_err("phy(%d) read timeout : %d\n", phy, reg); + return 0; + } + + if (again--) + goto read_again; + + return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; + } + + static void + jme_mdio_write(struct net_device *netdev, + int phy, int reg, int val) + { + struct jme_adapter *jme = netdev_priv(netdev); + int i; + + jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | + ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | + smi_phy_addr(phy) | smi_reg_addr(reg)); + + wmb(); + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) + pr_err("phy(%d) write timeout : %d\n", phy, reg); + } + + static inline void + jme_reset_phy_processor(struct jme_adapter *jme) + { + u32 val; + + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_ADVERTISE, ADVERTISE_ALL | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + val = jme_mdio_read(jme->dev, + jme->mii_if.phy_id, + MII_BMCR); + + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_BMCR, val | BMCR_RESET); + } + + static void + jme_setup_wakeup_frame(struct jme_adapter *jme, + const u32 *mask, u32 crc, int fnr) + { + int i; + + /* + * Setup CRC pattern + */ + jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, crc); + wmb(); + + /* + * Setup Mask + */ + for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { + jwrite32(jme, JME_WFOI, + ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | + (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, mask[i]); + wmb(); + } + } + + static inline void + jme_mac_rxclk_off(struct jme_adapter *jme) + { + jme->reg_gpreg1 |= GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); + } + + static inline void + jme_mac_rxclk_on(struct jme_adapter *jme) + { + jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); + } + + static inline void + jme_mac_txclk_off(struct jme_adapter *jme) + { + jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_mac_txclk_on(struct jme_adapter *jme) + { + u32 speed = jme->reg_ghc & GHC_SPEED; + if (speed == GHC_SPEED_1000M) + jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; + else + jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_reset_ghc_speed(struct jme_adapter *jme) + { + jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_reset_250A2_workaround(struct jme_adapter *jme) + { + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); + } + + static inline void + jme_assert_ghc_reset(struct jme_adapter *jme) + { + jme->reg_ghc |= GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_clear_ghc_reset(struct jme_adapter *jme) + { + jme->reg_ghc &= ~GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_reset_mac_processor(struct jme_adapter *jme) + { + static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; + u32 crc = 0xCDCDCDCD; + u32 gpreg0; + int i; + + jme_reset_ghc_speed(jme); + jme_reset_250A2_workaround(jme); + + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_assert_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); + udelay(1); + jme_clear_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); + + jwrite32(jme, JME_RXDBA_LO, 0x00000000); + jwrite32(jme, JME_RXDBA_HI, 0x00000000); + jwrite32(jme, JME_RXQDC, 0x00000000); + jwrite32(jme, JME_RXNDA, 0x00000000); + jwrite32(jme, JME_TXDBA_LO, 0x00000000); + jwrite32(jme, JME_TXDBA_HI, 0x00000000); + jwrite32(jme, JME_TXQDC, 0x00000000); + jwrite32(jme, JME_TXNDA, 0x00000000); + + jwrite32(jme, JME_RXMCHT_LO, 0x00000000); + jwrite32(jme, JME_RXMCHT_HI, 0x00000000); + for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) + jme_setup_wakeup_frame(jme, mask, crc, i); + if (jme->fpgaver) + gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; + else + gpreg0 = GPREG0_DEFAULT; + jwrite32(jme, JME_GPREG0, gpreg0); + } + + static inline void + jme_clear_pm(struct jme_adapter *jme) + { + jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); + } + + static int + jme_reload_eeprom(struct jme_adapter *jme) + { + u32 val; + int i; + + val = jread32(jme, JME_SMBCSR); + + if (val & SMBCSR_EEPROMD) { + val |= SMBCSR_CNACK; + jwrite32(jme, JME_SMBCSR, val); + val |= SMBCSR_RELOAD; + jwrite32(jme, JME_SMBCSR, val); + mdelay(12); + + for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { + mdelay(1); + if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) + break; + } + + if (i == 0) { + pr_err("eeprom reload timeout\n"); + return -EIO; + } + } + + return 0; + } + + static void + jme_load_macaddr(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + unsigned char macaddr[6]; + u32 val; + + spin_lock_bh(&jme->macaddr_lock); + val = jread32(jme, JME_RXUMA_LO); + macaddr[0] = (val >> 0) & 0xFF; + macaddr[1] = (val >> 8) & 0xFF; + macaddr[2] = (val >> 16) & 0xFF; + macaddr[3] = (val >> 24) & 0xFF; + val = jread32(jme, JME_RXUMA_HI); + macaddr[4] = (val >> 0) & 0xFF; + macaddr[5] = (val >> 8) & 0xFF; + memcpy(netdev->dev_addr, macaddr, 6); + spin_unlock_bh(&jme->macaddr_lock); + } + + static inline void + jme_set_rx_pcc(struct jme_adapter *jme, int p) + { + switch (p) { + case PCC_OFF: + jwrite32(jme, JME_PCCRX0, + ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P1: + jwrite32(jme, JME_PCCRX0, + ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P2: + jwrite32(jme, JME_PCCRX0, + ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P3: + jwrite32(jme, JME_PCCRX0, + ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + default: + break; + } + wmb(); + + if (!(test_bit(JME_FLAG_POLL, &jme->flags))) + netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p); + } + + static void + jme_start_irq(struct jme_adapter *jme) + { + register struct dynpcc_info *dpi = &(jme->dpi); + + jme_set_rx_pcc(jme, PCC_P1); + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + + jwrite32(jme, JME_PCCTX, + ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | + ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | + PCCTXQ0_EN + ); + + /* + * Enable Interrupts + */ + jwrite32(jme, JME_IENS, INTR_ENABLE); + } + + static inline void + jme_stop_irq(struct jme_adapter *jme) + { + /* + * Disable Interrupts + */ + jwrite32f(jme, JME_IENC, INTR_ENABLE); + } + + static u32 + jme_linkstat_from_phy(struct jme_adapter *jme) + { + u32 phylink, bmsr; + + phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); + bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); + if (bmsr & BMSR_ANCOMP) + phylink |= PHY_LINK_AUTONEG_COMPLETE; + + return phylink; + } + + static inline void + jme_set_phyfifo_5level(struct jme_adapter *jme) + { + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); + } + + static inline void + jme_set_phyfifo_8level(struct jme_adapter *jme) + { + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); + } + + static int + jme_check_link(struct net_device *netdev, int testonly) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr; + char linkmsg[64]; + int rc = 0; + + linkmsg[0] = '\0'; + + if (jme->fpgaver) + phylink = jme_linkstat_from_phy(jme); + else + phylink = jread32(jme, JME_PHY_LINK); + + if (phylink & PHY_LINK_UP) { + if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { + /* + * If we did not enable AN + * Speed/Duplex Info should be obtained from SMI + */ + phylink = PHY_LINK_UP; + + bmcr = jme_mdio_read(jme->dev, + jme->mii_if.phy_id, + MII_BMCR); + + phylink |= ((bmcr & BMCR_SPEED1000) && + (bmcr & BMCR_SPEED100) == 0) ? + PHY_LINK_SPEED_1000M : + (bmcr & BMCR_SPEED100) ? + PHY_LINK_SPEED_100M : + PHY_LINK_SPEED_10M; + + phylink |= (bmcr & BMCR_FULLDPLX) ? + PHY_LINK_DUPLEX : 0; + + strcat(linkmsg, "Forced: "); + } else { + /* + * Keep polling for speed/duplex resolve complete + */ + while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && + --cnt) { + + udelay(1); + + if (jme->fpgaver) + phylink = jme_linkstat_from_phy(jme); + else + phylink = jread32(jme, JME_PHY_LINK); + } + if (!cnt) + pr_err("Waiting speed resolve timeout\n"); + + strcat(linkmsg, "ANed: "); + } + + if (jme->phylink == phylink) { + rc = 1; + goto out; + } + if (testonly) + goto out; + + jme->phylink = phylink; + + /* + * The speed/duplex setting of jme->reg_ghc already cleared + * by jme_reset_mac_processor() + */ + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + jme->reg_ghc |= GHC_SPEED_10M; + strcat(linkmsg, "10 Mbps, "); + break; + case PHY_LINK_SPEED_100M: + jme->reg_ghc |= GHC_SPEED_100M; + strcat(linkmsg, "100 Mbps, "); + break; + case PHY_LINK_SPEED_1000M: + jme->reg_ghc |= GHC_SPEED_1000M; + strcat(linkmsg, "1000 Mbps, "); + break; + default: + break; + } + + if (phylink & PHY_LINK_DUPLEX) { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); + jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX); + jme->reg_ghc |= GHC_DPX; + } else { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | + TXMCS_BACKOFF | + TXMCS_CARRIERSENSE | + TXMCS_COLLISION); + jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX); + } + + jwrite32(jme, JME_GHC, jme->reg_ghc); + + if (is_buggy250(jme->pdev->device, jme->chiprev)) { + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); + if (!(phylink & PHY_LINK_DUPLEX)) + jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + jme_set_phyfifo_8level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_100M: + jme_set_phyfifo_5level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_1000M: + jme_set_phyfifo_8level(jme); + break; + default: + break; + } + } + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); + + strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? + "Full-Duplex, " : + "Half-Duplex, "); + strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? + "MDI-X" : + "MDI"); + netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg); + netif_carrier_on(netdev); + } else { + if (testonly) + goto out; + + netif_info(jme, link, jme->dev, "Link is down\n"); + jme->phylink = 0; + netif_carrier_off(netdev); + } + + out: + return rc; + } + + static int + jme_setup_tx_resources(struct jme_adapter *jme) + { + struct jme_ring *txring = &(jme->txring[0]); + + txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + &(txring->dmaalloc), + GFP_ATOMIC); + + if (!txring->alloc) + goto err_set_null; + + /* + * 16 Bytes align + */ + txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), + RING_DESC_ALIGN); + txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); + txring->next_to_use = 0; + atomic_set(&txring->next_to_clean, 0); + atomic_set(&txring->nr_free, jme->tx_ring_size); + + txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->tx_ring_size, GFP_ATOMIC); + if (unlikely(!(txring->bufinf))) + goto err_free_txring; + + /* + * Initialize Transmit Descriptors + */ + memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); + memset(txring->bufinf, 0, + sizeof(struct jme_buffer_info) * jme->tx_ring_size); + + return 0; + + err_free_txring: + dma_free_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + txring->alloc, + txring->dmaalloc); + + err_set_null: + txring->desc = NULL; + txring->dmaalloc = 0; + txring->dma = 0; + txring->bufinf = NULL; + + return -ENOMEM; + } + + static void + jme_free_tx_resources(struct jme_adapter *jme) + { + int i; + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi; + + if (txring->alloc) { + if (txring->bufinf) { + for (i = 0 ; i < jme->tx_ring_size ; ++i) { + txbi = txring->bufinf + i; + if (txbi->skb) { + dev_kfree_skb(txbi->skb); + txbi->skb = NULL; + } + txbi->mapping = 0; + txbi->len = 0; + txbi->nr_desc = 0; + txbi->start_xmit = 0; + } + kfree(txring->bufinf); + } + + dma_free_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + txring->alloc, + txring->dmaalloc); + + txring->alloc = NULL; + txring->desc = NULL; + txring->dmaalloc = 0; + txring->dma = 0; + txring->bufinf = NULL; + } + txring->next_to_use = 0; + atomic_set(&txring->next_to_clean, 0); + atomic_set(&txring->nr_free, 0); + } + + static inline void + jme_enable_tx_engine(struct jme_adapter *jme) + { + /* + * Select Queue 0 + */ + jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); + wmb(); + + /* + * Setup TX Queue 0 DMA Bass Address + */ + jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); + jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); + jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); + + /* + * Setup TX Descptor Count + */ + jwrite32(jme, JME_TXQDC, jme->tx_ring_size); + + /* + * Enable TX Engine + */ + wmb(); + jwrite32f(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_ENABLE); + + /* + * Start clock for TX MAC Processor + */ + jme_mac_txclk_on(jme); + } + + static inline void + jme_restart_tx_engine(struct jme_adapter *jme) + { + /* + * Restart TX Engine + */ + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_ENABLE); + } + + static inline void + jme_disable_tx_engine(struct jme_adapter *jme) + { + int i; + u32 val; + + /* + * Disable TX Engine + */ + jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); + wmb(); + + val = jread32(jme, JME_TXCS); + for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_TXCS); + rmb(); + } + + if (!i) + pr_err("Disable TX engine timeout\n"); + + /* + * Stop clock for TX MAC Processor + */ + jme_mac_txclk_off(jme); + } + + static void + jme_set_clean_rxdesc(struct jme_adapter *jme, int i) + { + struct jme_ring *rxring = &(jme->rxring[0]); + register struct rxdesc *rxdesc = rxring->desc; + struct jme_buffer_info *rxbi = rxring->bufinf; + rxdesc += i; + rxbi += i; + + rxdesc->dw[0] = 0; + rxdesc->dw[1] = 0; + rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); + rxdesc->desc1.bufaddrl = cpu_to_le32( + (__u64)rxbi->mapping & 0xFFFFFFFFUL); + rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); + if (jme->dev->features & NETIF_F_HIGHDMA) + rxdesc->desc1.flags = RXFLAG_64BIT; + wmb(); + rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; + } + + static int + jme_make_new_rx_buf(struct jme_adapter *jme, int i) + { + struct jme_ring *rxring = &(jme->rxring[0]); + struct jme_buffer_info *rxbi = rxring->bufinf + i; + struct sk_buff *skb; + dma_addr_t mapping; + + skb = netdev_alloc_skb(jme->dev, + jme->dev->mtu + RX_EXTRA_LEN); + if (unlikely(!skb)) + return -ENOMEM; + + mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + if (likely(rxbi->mapping)) + pci_unmap_page(jme->pdev, rxbi->mapping, + rxbi->len, PCI_DMA_FROMDEVICE); + + rxbi->skb = skb; + rxbi->len = skb_tailroom(skb); + rxbi->mapping = mapping; + return 0; + } + + static void + jme_free_rx_buf(struct jme_adapter *jme, int i) + { + struct jme_ring *rxring = &(jme->rxring[0]); + struct jme_buffer_info *rxbi = rxring->bufinf; + rxbi += i; + + if (rxbi->skb) { + pci_unmap_page(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxbi->skb); + rxbi->skb = NULL; + rxbi->mapping = 0; + rxbi->len = 0; + } + } + + static void + jme_free_rx_resources(struct jme_adapter *jme) + { + int i; + struct jme_ring *rxring = &(jme->rxring[0]); + + if (rxring->alloc) { + if (rxring->bufinf) { + for (i = 0 ; i < jme->rx_ring_size ; ++i) + jme_free_rx_buf(jme, i); + kfree(rxring->bufinf); + } + + dma_free_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + rxring->alloc, + rxring->dmaalloc); + rxring->alloc = NULL; + rxring->desc = NULL; + rxring->dmaalloc = 0; + rxring->dma = 0; + rxring->bufinf = NULL; + } + rxring->next_to_use = 0; + atomic_set(&rxring->next_to_clean, 0); + } + + static int + jme_setup_rx_resources(struct jme_adapter *jme) + { + int i; + struct jme_ring *rxring = &(jme->rxring[0]); + + rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + &(rxring->dmaalloc), + GFP_ATOMIC); + if (!rxring->alloc) + goto err_set_null; + + /* + * 16 Bytes align + */ + rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), + RING_DESC_ALIGN); + rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); + rxring->next_to_use = 0; + atomic_set(&rxring->next_to_clean, 0); + + rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->rx_ring_size, GFP_ATOMIC); + if (unlikely(!(rxring->bufinf))) + goto err_free_rxring; + + /* + * Initiallize Receive Descriptors + */ + memset(rxring->bufinf, 0, + sizeof(struct jme_buffer_info) * jme->rx_ring_size); + for (i = 0 ; i < jme->rx_ring_size ; ++i) { + if (unlikely(jme_make_new_rx_buf(jme, i))) { + jme_free_rx_resources(jme); + return -ENOMEM; + } + + jme_set_clean_rxdesc(jme, i); + } + + return 0; + + err_free_rxring: + dma_free_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + rxring->alloc, + rxring->dmaalloc); + err_set_null: + rxring->desc = NULL; + rxring->dmaalloc = 0; + rxring->dma = 0; + rxring->bufinf = NULL; + + return -ENOMEM; + } + + static inline void + jme_enable_rx_engine(struct jme_adapter *jme) + { + /* + * Select Queue 0 + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0); + wmb(); + + /* + * Setup RX DMA Bass Address + */ + jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); + jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); + jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); + + /* + * Setup RX Descriptor Count + */ + jwrite32(jme, JME_RXQDC, jme->rx_ring_size); + + /* + * Setup Unicast Filter + */ + jme_set_unicastaddr(jme->dev); + jme_set_multi(jme->dev); + + /* + * Enable RX Engine + */ + wmb(); + jwrite32f(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); + + /* + * Start clock for RX MAC Processor + */ + jme_mac_rxclk_on(jme); + } + + static inline void + jme_restart_rx_engine(struct jme_adapter *jme) + { + /* + * Start RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); + } + + static inline void + jme_disable_rx_engine(struct jme_adapter *jme) + { + int i; + u32 val; + + /* + * Disable RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs); + wmb(); + + val = jread32(jme, JME_RXCS); + for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_RXCS); + rmb(); + } + + if (!i) + pr_err("Disable RX engine timeout\n"); + + /* + * Stop clock for RX MAC Processor + */ + jme_mac_rxclk_off(jme); + } + + static u16 + jme_udpsum(struct sk_buff *skb) + { + u16 csum = 0xFFFFu; + + if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) + return csum; + if (skb->protocol != htons(ETH_P_IP)) + return csum; + skb_set_network_header(skb, ETH_HLEN); + if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || + (skb->len < (ETH_HLEN + + (ip_hdr(skb)->ihl << 2) + + sizeof(struct udphdr)))) { + skb_reset_network_header(skb); + return csum; + } + skb_set_transport_header(skb, + ETH_HLEN + (ip_hdr(skb)->ihl << 2)); + csum = udp_hdr(skb)->check; + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + return csum; + } + + static int + jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) + { + if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) + return false; + + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) + == RXWBFLAG_TCPON)) { + if (flags & RXWBFLAG_IPV4) + netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n"); + return false; + } + + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) + == RXWBFLAG_UDPON) && jme_udpsum(skb)) { + if (flags & RXWBFLAG_IPV4) + netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); + return false; + } + + if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) + == RXWBFLAG_IPV4)) { + netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n"); + return false; + } + + return true; + } + + static void + jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) + { + struct jme_ring *rxring = &(jme->rxring[0]); + struct rxdesc *rxdesc = rxring->desc; + struct jme_buffer_info *rxbi = rxring->bufinf; + struct sk_buff *skb; + int framesize; + + rxdesc += idx; + rxbi += idx; + + skb = rxbi->skb; + pci_dma_sync_single_for_cpu(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + + if (unlikely(jme_make_new_rx_buf(jme, idx))) { + pci_dma_sync_single_for_device(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + + ++(NET_STAT(jme).rx_dropped); + } else { + framesize = le16_to_cpu(rxdesc->descwb.framesize) + - RX_PREPAD_SIZE; + + skb_reserve(skb, RX_PREPAD_SIZE); + skb_put(skb, framesize); + skb->protocol = eth_type_trans(skb, jme->dev); + + if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { + u16 vid = le16_to_cpu(rxdesc->descwb.vlan); + + __vlan_hwaccel_put_tag(skb, vid); + NET_STAT(jme).rx_bytes += 4; + } + jme->jme_rx(skb); + + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == + cpu_to_le16(RXWBFLAG_DEST_MUL)) + ++(NET_STAT(jme).multicast); + + NET_STAT(jme).rx_bytes += framesize; + ++(NET_STAT(jme).rx_packets); + } + + jme_set_clean_rxdesc(jme, idx); + + } + + static int + jme_process_receive(struct jme_adapter *jme, int limit) + { + struct jme_ring *rxring = &(jme->rxring[0]); + struct rxdesc *rxdesc = rxring->desc; + int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; + + if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) + goto out_inc; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + goto out_inc; + + if (unlikely(!netif_carrier_ok(jme->dev))) + goto out_inc; + + i = atomic_read(&rxring->next_to_clean); + while (limit > 0) { + rxdesc = rxring->desc; + rxdesc += i; + + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || + !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) + goto out; + --limit; + + rmb(); + desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; + + if (unlikely(desccnt > 1 || + rxdesc->descwb.errstat & RXWBERR_ALLERR)) { + + if (rxdesc->descwb.errstat & RXWBERR_CRCERR) + ++(NET_STAT(jme).rx_crc_errors); + else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) + ++(NET_STAT(jme).rx_fifo_errors); + else + ++(NET_STAT(jme).rx_errors); + + if (desccnt > 1) + limit -= desccnt - 1; + + for (j = i, ccnt = desccnt ; ccnt-- ; ) { + jme_set_clean_rxdesc(jme, j); + j = (j + 1) & (mask); + } + + } else { + jme_alloc_and_feed_skb(jme, i); + } + + i = (i + desccnt) & (mask); + } + + out: + atomic_set(&rxring->next_to_clean, i); + + out_inc: + atomic_inc(&jme->rx_cleaning); + + return limit > 0 ? limit : 0; + + } + + static void + jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) + { + if (likely(atmp == dpi->cur)) { + dpi->cnt = 0; + return; + } + + if (dpi->attempt == atmp) { + ++(dpi->cnt); + } else { + dpi->attempt = atmp; + dpi->cnt = 0; + } + + } + + static void + jme_dynamic_pcc(struct jme_adapter *jme) + { + register struct dynpcc_info *dpi = &(jme->dpi); + + if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) + jme_attempt_pcc(dpi, PCC_P3); + else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || + dpi->intr_cnt > PCC_INTR_THRESHOLD) + jme_attempt_pcc(dpi, PCC_P2); + else + jme_attempt_pcc(dpi, PCC_P1); + + if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { + if (dpi->attempt < dpi->cur) + tasklet_schedule(&jme->rxclean_task); + jme_set_rx_pcc(jme, dpi->attempt); + dpi->cur = dpi->attempt; + dpi->cnt = 0; + } + } + + static void + jme_start_pcc_timer(struct jme_adapter *jme) + { + struct dynpcc_info *dpi = &(jme->dpi); + dpi->last_bytes = NET_STAT(jme).rx_bytes; + dpi->last_pkts = NET_STAT(jme).rx_packets; + dpi->intr_cnt = 0; + jwrite32(jme, JME_TMCSR, + TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); + } + + static inline void + jme_stop_pcc_timer(struct jme_adapter *jme) + { + jwrite32(jme, JME_TMCSR, 0); + } + + static void + jme_shutdown_nic(struct jme_adapter *jme) + { + u32 phylink; + + phylink = jme_linkstat_from_phy(jme); + + if (!(phylink & PHY_LINK_UP)) { + /* + * Disable all interrupt before issue timer + */ + jme_stop_irq(jme); + jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); + } + } + + static void + jme_pcc_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct net_device *netdev = jme->dev; + + if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { + jme_shutdown_nic(jme); + return; + } + + if (unlikely(!netif_carrier_ok(netdev) || + (atomic_read(&jme->link_changing) != 1) + )) { + jme_stop_pcc_timer(jme); + return; + } + + if (!(test_bit(JME_FLAG_POLL, &jme->flags))) + jme_dynamic_pcc(jme); + + jme_start_pcc_timer(jme); + } + + static inline void + jme_polling_mode(struct jme_adapter *jme) + { + jme_set_rx_pcc(jme, PCC_OFF); + } + + static inline void + jme_interrupt_mode(struct jme_adapter *jme) + { + jme_set_rx_pcc(jme, PCC_P1); + } + + static inline int + jme_pseudo_hotplug_enabled(struct jme_adapter *jme) + { + u32 apmc; + apmc = jread32(jme, JME_APMC); + return apmc & JME_APMC_PSEUDO_HP_EN; + } + + static void + jme_start_shutdown_timer(struct jme_adapter *jme) + { + u32 apmc; + + apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; + apmc &= ~JME_APMC_EPIEN_CTRL; + if (!no_extplug) { + jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); + wmb(); + } + jwrite32f(jme, JME_APMC, apmc); + + jwrite32f(jme, JME_TIMER2, 0); + set_bit(JME_FLAG_SHUTDOWN, &jme->flags); + jwrite32(jme, JME_TMCSR, + TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); + } + + static void + jme_stop_shutdown_timer(struct jme_adapter *jme) + { + u32 apmc; + + jwrite32f(jme, JME_TMCSR, 0); + jwrite32f(jme, JME_TIMER2, 0); + clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); + + apmc = jread32(jme, JME_APMC); + apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); + jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); + wmb(); + jwrite32f(jme, JME_APMC, apmc); + } + + static void + jme_link_change_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct net_device *netdev = jme->dev; + int rc; + + while (!atomic_dec_and_test(&jme->link_changing)) { + atomic_inc(&jme->link_changing); + netif_info(jme, intr, jme->dev, "Get link change lock failed\n"); + while (atomic_read(&jme->link_changing) != 1) + netif_info(jme, intr, jme->dev, "Waiting link change lock\n"); + } + + if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) + goto out; + + jme->old_mtu = netdev->mtu; + netif_stop_queue(netdev); + if (jme_pseudo_hotplug_enabled(jme)) + jme_stop_shutdown_timer(jme); + + jme_stop_pcc_timer(jme); + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + if (netif_carrier_ok(netdev)) { + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_polling_mode(jme); + + netif_carrier_off(netdev); + } + + jme_check_link(netdev, 0); + if (netif_carrier_ok(netdev)) { + rc = jme_setup_rx_resources(jme); + if (rc) { + pr_err("Allocating resources for RX error, Device STOPPED!\n"); + goto out_enable_tasklet; + } + + rc = jme_setup_tx_resources(jme); + if (rc) { + pr_err("Allocating resources for TX error, Device STOPPED!\n"); + goto err_out_free_rx_resources; + } + + jme_enable_rx_engine(jme); + jme_enable_tx_engine(jme); + + netif_start_queue(netdev); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_interrupt_mode(jme); + + jme_start_pcc_timer(jme); + } else if (jme_pseudo_hotplug_enabled(jme)) { + jme_start_shutdown_timer(jme); + } + + goto out_enable_tasklet; + + err_out_free_rx_resources: + jme_free_rx_resources(jme); + out_enable_tasklet: + tasklet_enable(&jme->txclean_task); + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + out: + atomic_inc(&jme->link_changing); + } + + static void + jme_rx_clean_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct dynpcc_info *dpi = &(jme->dpi); + + jme_process_receive(jme, jme->rx_ring_size); + ++(dpi->intr_cnt); + + } + + static int + jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) + { + struct jme_adapter *jme = jme_napi_priv(holder); + int rest; + + rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); + + while (atomic_read(&jme->rx_empty) > 0) { + atomic_dec(&jme->rx_empty); + ++(NET_STAT(jme).rx_dropped); + jme_restart_rx_engine(jme); + } + atomic_inc(&jme->rx_empty); + + if (rest) { + JME_RX_COMPLETE(netdev, holder); + jme_interrupt_mode(jme); + } + + JME_NAPI_WEIGHT_SET(budget, rest); + return JME_NAPI_WEIGHT_VAL(budget) - rest; + } + + static void + jme_rx_empty_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + return; + + if (unlikely(!netif_carrier_ok(jme->dev))) + return; + + netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n"); + + jme_rx_clean_tasklet(arg); + + while (atomic_read(&jme->rx_empty) > 0) { + atomic_dec(&jme->rx_empty); + ++(NET_STAT(jme).rx_dropped); + jme_restart_rx_engine(jme); + } + atomic_inc(&jme->rx_empty); + } + + static void + jme_wake_queue_if_stopped(struct jme_adapter *jme) + { + struct jme_ring *txring = &(jme->txring[0]); + + smp_wmb(); + if (unlikely(netif_queue_stopped(jme->dev) && + atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { + netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n"); + netif_wake_queue(jme->dev); + } + + } + + static void + jme_tx_clean_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc = txring->desc; + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; + int i, j, cnt = 0, max, err, mask; + + tx_dbg(jme, "Into txclean\n"); + + if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) + goto out; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + goto out; + + if (unlikely(!netif_carrier_ok(jme->dev))) + goto out; + + max = jme->tx_ring_size - atomic_read(&txring->nr_free); + mask = jme->tx_ring_mask; + + for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { + + ctxbi = txbi + i; + + if (likely(ctxbi->skb && + !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { + + tx_dbg(jme, "txclean: %d+%d@%lu\n", + i, ctxbi->nr_desc, jiffies); + + err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; + + for (j = 1 ; j < ctxbi->nr_desc ; ++j) { + ttxbi = txbi + ((i + j) & (mask)); + txdesc[(i + j) & (mask)].dw[0] = 0; + + pci_unmap_page(jme->pdev, + ttxbi->mapping, + ttxbi->len, + PCI_DMA_TODEVICE); + + ttxbi->mapping = 0; + ttxbi->len = 0; + } + + dev_kfree_skb(ctxbi->skb); + + cnt += ctxbi->nr_desc; + + if (unlikely(err)) { + ++(NET_STAT(jme).tx_carrier_errors); + } else { + ++(NET_STAT(jme).tx_packets); + NET_STAT(jme).tx_bytes += ctxbi->len; + } + + ctxbi->skb = NULL; + ctxbi->len = 0; + ctxbi->start_xmit = 0; + + } else { + break; + } + + i = (i + ctxbi->nr_desc) & mask; + + ctxbi->nr_desc = 0; + } + + tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies); + atomic_set(&txring->next_to_clean, i); + atomic_add(cnt, &txring->nr_free); + + jme_wake_queue_if_stopped(jme); + + out: + atomic_inc(&jme->tx_cleaning); + } + + static void + jme_intr_msi(struct jme_adapter *jme, u32 intrstat) + { + /* + * Disable interrupt + */ + jwrite32f(jme, JME_IENC, INTR_ENABLE); + + if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { + /* + * Link change event is critical + * all other events are ignored + */ + jwrite32(jme, JME_IEVE, intrstat); + tasklet_schedule(&jme->linkch_task); + goto out_reenable; + } + + if (intrstat & INTR_TMINTR) { + jwrite32(jme, JME_IEVE, INTR_TMINTR); + tasklet_schedule(&jme->pcc_task); + } + + if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { + jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); + tasklet_schedule(&jme->txclean_task); + } + + if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { + jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | + INTR_PCCRX0 | + INTR_RX0EMP)) | + INTR_RX0); + } + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + if (intrstat & INTR_RX0EMP) + atomic_inc(&jme->rx_empty); + + if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { + if (likely(JME_RX_SCHEDULE_PREP(jme))) { + jme_polling_mode(jme); + JME_RX_SCHEDULE(jme); + } + } + } else { + if (intrstat & INTR_RX0EMP) { + atomic_inc(&jme->rx_empty); + tasklet_hi_schedule(&jme->rxempty_task); + } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { + tasklet_hi_schedule(&jme->rxclean_task); + } + } + + out_reenable: + /* + * Re-enable interrupt + */ + jwrite32f(jme, JME_IENS, INTR_ENABLE); + } + + static irqreturn_t + jme_intr(int irq, void *dev_id) + { + struct net_device *netdev = dev_id; + struct jme_adapter *jme = netdev_priv(netdev); + u32 intrstat; + + intrstat = jread32(jme, JME_IEVE); + + /* + * Check if it's really an interrupt for us + */ + if (unlikely((intrstat & INTR_ENABLE) == 0)) + return IRQ_NONE; + + /* + * Check if the device still exist + */ + if (unlikely(intrstat == ~((typeof(intrstat))0))) + return IRQ_NONE; + + jme_intr_msi(jme, intrstat); + + return IRQ_HANDLED; + } + + static irqreturn_t + jme_msi(int irq, void *dev_id) + { + struct net_device *netdev = dev_id; + struct jme_adapter *jme = netdev_priv(netdev); + u32 intrstat; + + intrstat = jread32(jme, JME_IEVE); + + jme_intr_msi(jme, intrstat); + + return IRQ_HANDLED; + } + + static void + jme_reset_link(struct jme_adapter *jme) + { + jwrite32(jme, JME_TMCSR, TMCSR_SWIT); + } + + static void + jme_restart_an(struct jme_adapter *jme) + { + u32 bmcr; + + spin_lock_bh(&jme->phy_lock); + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + spin_unlock_bh(&jme->phy_lock); + } + + static int + jme_request_irq(struct jme_adapter *jme) + { + int rc; + struct net_device *netdev = jme->dev; + irq_handler_t handler = jme_intr; + int irq_flags = IRQF_SHARED; + + if (!pci_enable_msi(jme->pdev)) { + set_bit(JME_FLAG_MSI, &jme->flags); + handler = jme_msi; + irq_flags = 0; + } + + rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (rc) { + netdev_err(netdev, + "Unable to request %s interrupt (return: %d)\n", + test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", + rc); + + if (test_bit(JME_FLAG_MSI, &jme->flags)) { + pci_disable_msi(jme->pdev); + clear_bit(JME_FLAG_MSI, &jme->flags); + } + } else { + netdev->irq = jme->pdev->irq; + } + + return rc; + } + + static void + jme_free_irq(struct jme_adapter *jme) + { + free_irq(jme->pdev->irq, jme->dev); + if (test_bit(JME_FLAG_MSI, &jme->flags)) { + pci_disable_msi(jme->pdev); + clear_bit(JME_FLAG_MSI, &jme->flags); + jme->dev->irq = jme->pdev->irq; + } + } + + static inline void + jme_new_phy_on(struct jme_adapter *jme) + { + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL); + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_ENBG; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); + } + + static inline void + jme_new_phy_off(struct jme_adapter *jme) + { + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL; + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_PDD3COLD; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); + } + + static inline void + jme_phy_on(struct jme_adapter *jme) + { + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr &= ~BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_on(jme); + } + + static inline void + jme_phy_off(struct jme_adapter *jme) + { + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_off(jme); + } + + static int + jme_open(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + int rc; + + jme_clear_pm(jme); + JME_NAPI_ENABLE(jme); + + tasklet_enable(&jme->linkch_task); + tasklet_enable(&jme->txclean_task); + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + + rc = jme_request_irq(jme); + if (rc) + goto err_out; + + jme_start_irq(jme); + + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + else + jme_reset_phy_processor(jme); + + jme_reset_link(jme); + + return 0; + + err_out: + netif_stop_queue(netdev); + netif_carrier_off(netdev); + return rc; + } + + static void + jme_set_100m_half(struct jme_adapter *jme) + { + u32 bmcr, tmp; + + jme_phy_on(jme); + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | + BMCR_SPEED1000 | BMCR_FULLDPLX); + tmp |= BMCR_SPEED100; + + if (bmcr != tmp) + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); + + if (jme->fpgaver) + jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); + else + jwrite32(jme, JME_GHC, GHC_SPEED_100M); + } + + #define JME_WAIT_LINK_TIME 2000 /* 2000ms */ + static void + jme_wait_link(struct jme_adapter *jme) + { + u32 phylink, to = JME_WAIT_LINK_TIME; + + mdelay(1000); + phylink = jme_linkstat_from_phy(jme); + while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { + mdelay(10); + phylink = jme_linkstat_from_phy(jme); + } + } + + static void + jme_powersave_phy(struct jme_adapter *jme) + { + if (jme->reg_pmcs) { + jme_set_100m_half(jme); + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) + jme_wait_link(jme); + jme_clear_pm(jme); + } else { + jme_phy_off(jme); + } + } + + static int + jme_close(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + jme_stop_irq(jme); + jme_free_irq(jme); + + JME_NAPI_DISABLE(jme); + + tasklet_disable(&jme->linkch_task); + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + jme->phylink = 0; + jme_phy_off(jme); + + return 0; + } + + static int + jme_alloc_txdesc(struct jme_adapter *jme, + struct sk_buff *skb) + { + struct jme_ring *txring = &(jme->txring[0]); + int idx, nr_alloc, mask = jme->tx_ring_mask; + + idx = txring->next_to_use; + nr_alloc = skb_shinfo(skb)->nr_frags + 2; + + if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) + return -1; + + atomic_sub(nr_alloc, &txring->nr_free); + + txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; + + return idx; + } + + static void + jme_fill_tx_map(struct pci_dev *pdev, + struct txdesc *txdesc, + struct jme_buffer_info *txbi, + struct page *page, + u32 page_offset, + u32 len, + u8 hidma) + { + dma_addr_t dmaaddr; + + dmaaddr = pci_map_page(pdev, + page, + page_offset, + len, + PCI_DMA_TODEVICE); + + pci_dma_sync_single_for_device(pdev, + dmaaddr, + len, + PCI_DMA_TODEVICE); + + txdesc->dw[0] = 0; + txdesc->dw[1] = 0; + txdesc->desc2.flags = TXFLAG_OWN; + txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; + txdesc->desc2.datalen = cpu_to_le16(len); + txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); + txdesc->desc2.bufaddrl = cpu_to_le32( + (__u64)dmaaddr & 0xFFFFFFFFUL); + + txbi->mapping = dmaaddr; + txbi->len = len; + } + + static void + jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) + { + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc = txring->desc, *ctxdesc; + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; + u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; + int i, nr_frags = skb_shinfo(skb)->nr_frags; + int mask = jme->tx_ring_mask; + const struct skb_frag_struct *frag; + u32 len; + + for (i = 0 ; i < nr_frags ; ++i) { + frag = &skb_shinfo(skb)->frags[i]; + ctxdesc = txdesc + ((idx + i + 2) & (mask)); + ctxbi = txbi + ((idx + i + 2) & (mask)); + + jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + skb_frag_page(frag), + frag->page_offset, skb_frag_size(frag), hidma); + } + + len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; + ctxdesc = txdesc + ((idx + 1) & (mask)); + ctxbi = txbi + ((idx + 1) & (mask)); + jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), + offset_in_page(skb->data), len, hidma); + + } + + static int + jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) + { + if (unlikely(skb_shinfo(skb)->gso_size && + skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { + dev_kfree_skb(skb); + return -1; + } + + return 0; + } + + static int + jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) + { + *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); + if (*mss) { + *flags |= TXFLAG_LSEN; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr, + &ip6h->daddr, 0, + IPPROTO_TCP, + 0); + } + + return 0; + } + + return 1; + } + + static void + jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) + { + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_proto; + + switch (skb->protocol) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + ip_proto = 0; + break; + } + + switch (ip_proto) { + case IPPROTO_TCP: + *flags |= TXFLAG_TCPCS; + break; + case IPPROTO_UDP: + *flags |= TXFLAG_UDPCS; + break; + default: + netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n"); + break; + } + } + } + + static inline void + jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) + { + if (vlan_tx_tag_present(skb)) { + *flags |= TXFLAG_TAGON; + *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); + } + } + + static int + jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) + { + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc; + struct jme_buffer_info *txbi; + u8 flags; + + txdesc = (struct txdesc *)txring->desc + idx; + txbi = txring->bufinf + idx; + + txdesc->dw[0] = 0; + txdesc->dw[1] = 0; + txdesc->dw[2] = 0; + txdesc->dw[3] = 0; + txdesc->desc1.pktsize = cpu_to_le16(skb->len); + /* + * Set OWN bit at final. + * When kernel transmit faster than NIC. + * And NIC trying to send this descriptor before we tell + * it to start sending this TX queue. + * Other fields are already filled correctly. + */ + wmb(); + flags = TXFLAG_OWN | TXFLAG_INT; + /* + * Set checksum flags while not tso + */ + if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) + jme_tx_csum(jme, skb, &flags); + jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); + jme_map_tx_skb(jme, skb, idx); + txdesc->desc1.flags = flags; + /* + * Set tx buffer info after telling NIC to send + * For better tx_clean timing + */ + wmb(); + txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; + txbi->skb = skb; + txbi->len = skb->len; + txbi->start_xmit = jiffies; + if (!txbi->start_xmit) + txbi->start_xmit = (0UL-1); + + return 0; + } + + static void + jme_stop_queue_if_full(struct jme_adapter *jme) + { + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi = txring->bufinf; + int idx = atomic_read(&txring->next_to_clean); + + txbi += idx; + + smp_wmb(); + if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { + netif_stop_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n"); + smp_wmb(); + if (atomic_read(&txring->nr_free) + >= (jme->tx_wake_threshold)) { + netif_wake_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n"); + } + } + + if (unlikely(txbi->start_xmit && + (jiffies - txbi->start_xmit) >= TX_TIMEOUT && + txbi->skb)) { + netif_stop_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, + "TX Queue Stopped %d@%lu\n", idx, jiffies); + } + } + + /* + * This function is already protected by netif_tx_lock() + */ + + static netdev_tx_t + jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + int idx; + + if (unlikely(jme_expand_header(jme, skb))) { + ++(NET_STAT(jme).tx_dropped); + return NETDEV_TX_OK; + } + + idx = jme_alloc_txdesc(jme, skb); + + if (unlikely(idx < 0)) { + netif_stop_queue(netdev); + netif_err(jme, tx_err, jme->dev, + "BUG! Tx ring full when queue awake!\n"); + + return NETDEV_TX_BUSY; + } + + jme_fill_tx_desc(jme, skb, idx); + + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_QUEUE0S | + TXCS_ENABLE); + + tx_dbg(jme, "xmit: %d+%d@%lu\n", + idx, skb_shinfo(skb)->nr_frags + 2, jiffies); + jme_stop_queue_if_full(jme); + + return NETDEV_TX_OK; + } + + static void + jme_set_unicastaddr(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + val = (netdev->dev_addr[3] & 0xff) << 24 | + (netdev->dev_addr[2] & 0xff) << 16 | + (netdev->dev_addr[1] & 0xff) << 8 | + (netdev->dev_addr[0] & 0xff); + jwrite32(jme, JME_RXUMA_LO, val); + val = (netdev->dev_addr[5] & 0xff) << 8 | + (netdev->dev_addr[4] & 0xff); + jwrite32(jme, JME_RXUMA_HI, val); + } + + static int + jme_set_macaddr(struct net_device *netdev, void *p) + { + struct jme_adapter *jme = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (netif_running(netdev)) + return -EBUSY; + + spin_lock_bh(&jme->macaddr_lock); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + jme_set_unicastaddr(netdev); + spin_unlock_bh(&jme->macaddr_lock); + + return 0; + } + + static void + jme_set_multi(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 mc_hash[2] = {}; + + spin_lock_bh(&jme->rxmcs_lock); + + jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; + + if (netdev->flags & IFF_PROMISC) { + jme->reg_rxmcs |= RXMCS_ALLFRAME; + } else if (netdev->flags & IFF_ALLMULTI) { + jme->reg_rxmcs |= RXMCS_ALLMULFRAME; + } else if (netdev->flags & IFF_MULTICAST) { + struct netdev_hw_addr *ha; + int bit_nr; + + jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; + netdev_for_each_mc_addr(ha, netdev) { + bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; + mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); + } + + jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); + jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); + } + + wmb(); + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + + spin_unlock_bh(&jme->rxmcs_lock); + } + + static int + jme_change_mtu(struct net_device *netdev, int new_mtu) + { + struct jme_adapter *jme = netdev_priv(netdev); + + if (new_mtu == jme->old_mtu) + return 0; + + if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || + ((new_mtu) < IPV6_MIN_MTU)) + return -EINVAL; + + if (new_mtu > 4000) { + jme->reg_rxcs &= ~RXCS_FIFOTHNP; + jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; + jme_restart_rx_engine(jme); + } else { + jme->reg_rxcs &= ~RXCS_FIFOTHNP; + jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; + jme_restart_rx_engine(jme); + } + + netdev->mtu = new_mtu; + netdev_update_features(netdev); + + jme_reset_link(jme); + + return 0; + } + + static void + jme_tx_timeout(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + + jme->phylink = 0; + jme_reset_phy_processor(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + + /* + * Force to Reset the link again + */ + jme_reset_link(jme); + } + + static inline void jme_pause_rx(struct jme_adapter *jme) + { + atomic_dec(&jme->link_changing); + + jme_set_rx_pcc(jme, PCC_OFF); + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_DISABLE(jme); + } else { + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + } + } + + static inline void jme_resume_rx(struct jme_adapter *jme) + { + struct dynpcc_info *dpi = &(jme->dpi); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_ENABLE(jme); + } else { + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + } + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + jme_set_rx_pcc(jme, PCC_P1); + + atomic_inc(&jme->link_changing); + } + + static void + jme_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) + { + struct jme_adapter *jme = netdev_priv(netdev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(jme->pdev)); + } + + static int + jme_get_regs_len(struct net_device *netdev) + { + return JME_REG_LEN; + } + + static void + mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) + { + int i; + + for (i = 0 ; i < len ; i += 4) + p[i >> 2] = jread32(jme, reg + i); + } + + static void + mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) + { + int i; + u16 *p16 = (u16 *)p; + + for (i = 0 ; i < reg_nr ; ++i) + p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); + } + + static void + jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 *p32 = (u32 *)p; + + memset(p, 0xFF, JME_REG_LEN); + + regs->version = 1; + mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); + + p32 += 0x100 >> 2; + mdio_memcpy(jme, p32, JME_PHY_REG_NR); + } + + static int + jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + + ecmd->tx_coalesce_usecs = PCC_TX_TO; + ecmd->tx_max_coalesced_frames = PCC_TX_CNT; + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + ecmd->use_adaptive_rx_coalesce = false; + ecmd->rx_coalesce_usecs = 0; + ecmd->rx_max_coalesced_frames = 0; + return 0; + } + + ecmd->use_adaptive_rx_coalesce = true; + + switch (jme->dpi.cur) { + case PCC_P1: + ecmd->rx_coalesce_usecs = PCC_P1_TO; + ecmd->rx_max_coalesced_frames = PCC_P1_CNT; + break; + case PCC_P2: + ecmd->rx_coalesce_usecs = PCC_P2_TO; + ecmd->rx_max_coalesced_frames = PCC_P2_CNT; + break; + case PCC_P3: + ecmd->rx_coalesce_usecs = PCC_P3_TO; + ecmd->rx_max_coalesced_frames = PCC_P3_CNT; + break; + default: + break; + } + + return 0; + } + + static int + jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + struct dynpcc_info *dpi = &(jme->dpi); + + if (netif_running(netdev)) + return -EBUSY; + + if (ecmd->use_adaptive_rx_coalesce && + test_bit(JME_FLAG_POLL, &jme->flags)) { + clear_bit(JME_FLAG_POLL, &jme->flags); + jme->jme_rx = netif_rx; + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + jme_set_rx_pcc(jme, PCC_P1); + jme_interrupt_mode(jme); + } else if (!(ecmd->use_adaptive_rx_coalesce) && + !(test_bit(JME_FLAG_POLL, &jme->flags))) { + set_bit(JME_FLAG_POLL, &jme->flags); + jme->jme_rx = netif_receive_skb; + jme_interrupt_mode(jme); + } + + return 0; + } + + static void + jme_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; + ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; + + spin_lock_bh(&jme->phy_lock); + val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); + spin_unlock_bh(&jme->phy_lock); + + ecmd->autoneg = + (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; + } + + static int + jme_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ + (ecmd->tx_pause != 0)) { + + if (ecmd->tx_pause) + jme->reg_txpfc |= TXPFC_PF_EN; + else + jme->reg_txpfc &= ~TXPFC_PF_EN; + + jwrite32(jme, JME_TXPFC, jme->reg_txpfc); + } + + spin_lock_bh(&jme->rxmcs_lock); + if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ + (ecmd->rx_pause != 0)) { + + if (ecmd->rx_pause) + jme->reg_rxmcs |= RXMCS_FLOWCTRL; + else + jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; + + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + } + spin_unlock_bh(&jme->rxmcs_lock); + + spin_lock_bh(&jme->phy_lock); + val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); + if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ + (ecmd->autoneg != 0)) { + + if (ecmd->autoneg) + val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + else + val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + jme_mdio_write(jme->dev, jme->mii_if.phy_id, + MII_ADVERTISE, val); + } + spin_unlock_bh(&jme->phy_lock); + + return 0; + } + + static void + jme_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) + { + struct jme_adapter *jme = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + + wol->wolopts = 0; + + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) + wol->wolopts |= WAKE_PHY; + + if (jme->reg_pmcs & PMCS_MFEN) + wol->wolopts |= WAKE_MAGIC; + + } + + static int + jme_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) + { + struct jme_adapter *jme = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_MAGICSECURE | + WAKE_UCAST | + WAKE_MCAST | + WAKE_BCAST | + WAKE_ARP)) + return -EOPNOTSUPP; + + jme->reg_pmcs = 0; + + if (wol->wolopts & WAKE_PHY) + jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; + + if (wol->wolopts & WAKE_MAGIC) + jme->reg_pmcs |= PMCS_MFEN; + + jwrite32(jme, JME_PMCS, jme->reg_pmcs); + device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs)); + + return 0; + } + + static int + jme_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + int rc; + + spin_lock_bh(&jme->phy_lock); + rc = mii_ethtool_gset(&(jme->mii_if), ecmd); + spin_unlock_bh(&jme->phy_lock); + return rc; + } + + static int + jme_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + int rc, fdc = 0; + + if (ethtool_cmd_speed(ecmd) == SPEED_1000 + && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + /* + * Check If user changed duplex only while force_media. + * Hardware would not generate link change interrupt. + */ + if (jme->mii_if.force_media && + ecmd->autoneg != AUTONEG_ENABLE && + (jme->mii_if.full_duplex != ecmd->duplex)) + fdc = 1; + + spin_lock_bh(&jme->phy_lock); + rc = mii_ethtool_sset(&(jme->mii_if), ecmd); + spin_unlock_bh(&jme->phy_lock); + + if (!rc) { + if (fdc) + jme_reset_link(jme); + jme->old_ecmd = *ecmd; + set_bit(JME_FLAG_SSET, &jme->flags); + } + + return rc; + } + + static int + jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) + { + int rc; + struct jme_adapter *jme = netdev_priv(netdev); + struct mii_ioctl_data *mii_data = if_mii(rq); + unsigned int duplex_chg; + + if (cmd == SIOCSMIIREG) { + u16 val = mii_data->val_in; + if (!(val & (BMCR_RESET|BMCR_ANENABLE)) && + (val & BMCR_SPEED1000)) + return -EINVAL; + } + + spin_lock_bh(&jme->phy_lock); + rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg); + spin_unlock_bh(&jme->phy_lock); + + if (!rc && (cmd == SIOCSMIIREG)) { + if (duplex_chg) + jme_reset_link(jme); + jme_get_settings(netdev, &jme->old_ecmd); + set_bit(JME_FLAG_SSET, &jme->flags); + } + + return rc; + } + + static u32 + jme_get_link(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; + } + + static u32 + jme_get_msglevel(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + return jme->msg_enable; + } + + static void + jme_set_msglevel(struct net_device *netdev, u32 value) + { + struct jme_adapter *jme = netdev_priv(netdev); + jme->msg_enable = value; + } + + static u32 + jme_fix_features(struct net_device *netdev, u32 features) + { + if (netdev->mtu > 1900) + features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); + return features; + } + + static int + jme_set_features(struct net_device *netdev, u32 features) + { + struct jme_adapter *jme = netdev_priv(netdev); + + spin_lock_bh(&jme->rxmcs_lock); + if (features & NETIF_F_RXCSUM) + jme->reg_rxmcs |= RXMCS_CHECKSUM; + else + jme->reg_rxmcs &= ~RXMCS_CHECKSUM; + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + spin_unlock_bh(&jme->rxmcs_lock); + + return 0; + } + + static int + jme_nway_reset(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + jme_restart_an(jme); + return 0; + } + + static u8 + jme_smb_read(struct jme_adapter *jme, unsigned int addr) + { + u32 val; + int to; + + val = jread32(jme, JME_SMBCSR); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBCSR_BUSY) && --to) { + msleep(1); + val = jread32(jme, JME_SMBCSR); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return 0xFF; + } + + jwrite32(jme, JME_SMBINTF, + ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | + SMBINTF_HWRWN_READ | + SMBINTF_HWCMD); + + val = jread32(jme, JME_SMBINTF); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBINTF_HWCMD) && --to) { + msleep(1); + val = jread32(jme, JME_SMBINTF); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return 0xFF; + } + + return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; + } + + static void + jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) + { + u32 val; + int to; + + val = jread32(jme, JME_SMBCSR); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBCSR_BUSY) && --to) { + msleep(1); + val = jread32(jme, JME_SMBCSR); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return; + } + + jwrite32(jme, JME_SMBINTF, + ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | + ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | + SMBINTF_HWRWN_WRITE | + SMBINTF_HWCMD); + + val = jread32(jme, JME_SMBINTF); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBINTF_HWCMD) && --to) { + msleep(1); + val = jread32(jme, JME_SMBINTF); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return; + } + + mdelay(2); + } + + static int + jme_get_eeprom_len(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + val = jread32(jme, JME_SMBCSR); + return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; + } + + static int + jme_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) + { + struct jme_adapter *jme = netdev_priv(netdev); + int i, offset = eeprom->offset, len = eeprom->len; + + /* + * ethtool will check the boundary for us + */ + eeprom->magic = JME_EEPROM_MAGIC; + for (i = 0 ; i < len ; ++i) + data[i] = jme_smb_read(jme, i + offset); + + return 0; + } + + static int + jme_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) + { + struct jme_adapter *jme = netdev_priv(netdev); + int i, offset = eeprom->offset, len = eeprom->len; + + if (eeprom->magic != JME_EEPROM_MAGIC) + return -EINVAL; + + /* + * ethtool will check the boundary for us + */ + for (i = 0 ; i < len ; ++i) + jme_smb_write(jme, i + offset, data[i]); + + return 0; + } + + static const struct ethtool_ops jme_ethtool_ops = { + .get_drvinfo = jme_get_drvinfo, + .get_regs_len = jme_get_regs_len, + .get_regs = jme_get_regs, + .get_coalesce = jme_get_coalesce, + .set_coalesce = jme_set_coalesce, + .get_pauseparam = jme_get_pauseparam, + .set_pauseparam = jme_set_pauseparam, + .get_wol = jme_get_wol, + .set_wol = jme_set_wol, + .get_settings = jme_get_settings, + .set_settings = jme_set_settings, + .get_link = jme_get_link, + .get_msglevel = jme_get_msglevel, + .set_msglevel = jme_set_msglevel, + .nway_reset = jme_nway_reset, + .get_eeprom_len = jme_get_eeprom_len, + .get_eeprom = jme_get_eeprom, + .set_eeprom = jme_set_eeprom, + }; + + static int + jme_pci_dma64(struct pci_dev *pdev) + { + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + return 1; + + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) + return 1; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + return 0; + + return -1; + } + + static inline void + jme_phy_init(struct jme_adapter *jme) + { + u16 reg26; + + reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); + } + + static inline void + jme_check_hw_ver(struct jme_adapter *jme) + { + u32 chipmode; + + chipmode = jread32(jme, JME_CHIPMODE); + + jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; + jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; + jme->chip_main_rev = jme->chiprev & 0xF; + jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; + } + + static const struct net_device_ops jme_netdev_ops = { + .ndo_open = jme_open, + .ndo_stop = jme_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = jme_ioctl, + .ndo_start_xmit = jme_start_xmit, + .ndo_set_mac_address = jme_set_macaddr, + .ndo_set_rx_mode = jme_set_multi, + .ndo_change_mtu = jme_change_mtu, + .ndo_tx_timeout = jme_tx_timeout, + .ndo_fix_features = jme_fix_features, + .ndo_set_features = jme_set_features, + }; + + static int __devinit + jme_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) + { + int rc = 0, using_dac, i; + struct net_device *netdev; + struct jme_adapter *jme; + u16 bmcr, bmsr; + u32 apmc; + + /* + * set up PCI device basics + */ + rc = pci_enable_device(pdev); + if (rc) { + pr_err("Cannot enable PCI device\n"); + goto err_out; + } + + using_dac = jme_pci_dma64(pdev); + if (using_dac < 0) { + pr_err("Cannot set PCI DMA Mask\n"); + rc = -EIO; + goto err_out_disable_pdev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + pr_err("No PCI resource region found\n"); + rc = -ENOMEM; + goto err_out_disable_pdev; + } + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) { + pr_err("Cannot obtain PCI resource region\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + /* + * alloc and init net device + */ + netdev = alloc_etherdev(sizeof(*jme)); + if (!netdev) { + pr_err("Cannot allocate netdev structure\n"); + rc = -ENOMEM; + goto err_out_release_regions; + } + netdev->netdev_ops = &jme_netdev_ops; + netdev->ethtool_ops = &jme_ethtool_ops; + netdev->watchdog_timeo = TX_TIMEOUT; + netdev->hw_features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; + netdev->features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; + if (using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + /* + * init adapter info + */ + jme = netdev_priv(netdev); + jme->pdev = pdev; + jme->dev = netdev; + jme->jme_rx = netif_rx; + jme->old_mtu = netdev->mtu = 1500; + jme->phylink = 0; + jme->tx_ring_size = 1 << 10; + jme->tx_ring_mask = jme->tx_ring_size - 1; + jme->tx_wake_threshold = 1 << 9; + jme->rx_ring_size = 1 << 9; + jme->rx_ring_mask = jme->rx_ring_size - 1; + jme->msg_enable = JME_DEF_MSG_ENABLE; + jme->regs = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!(jme->regs)) { + pr_err("Mapping PCI resource region error\n"); + rc = -ENOMEM; + goto err_out_free_netdev; + } + + if (no_pseudohp) { + apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; + jwrite32(jme, JME_APMC, apmc); + } else if (force_pseudohp) { + apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; + jwrite32(jme, JME_APMC, apmc); + } + + NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) + + spin_lock_init(&jme->phy_lock); + spin_lock_init(&jme->macaddr_lock); + spin_lock_init(&jme->rxmcs_lock); + + atomic_set(&jme->link_changing, 1); + atomic_set(&jme->rx_cleaning, 1); + atomic_set(&jme->tx_cleaning, 1); + atomic_set(&jme->rx_empty, 1); + + tasklet_init(&jme->pcc_task, + jme_pcc_tasklet, + (unsigned long) jme); + tasklet_init(&jme->linkch_task, + jme_link_change_tasklet, + (unsigned long) jme); + tasklet_init(&jme->txclean_task, + jme_tx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxclean_task, + jme_rx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxempty_task, + jme_rx_empty_tasklet, + (unsigned long) jme); + tasklet_disable_nosync(&jme->linkch_task); + tasklet_disable_nosync(&jme->txclean_task); + tasklet_disable_nosync(&jme->rxclean_task); + tasklet_disable_nosync(&jme->rxempty_task); + jme->dpi.cur = PCC_P1; + + jme->reg_ghc = 0; + jme->reg_rxcs = RXCS_DEFAULT; + jme->reg_rxmcs = RXMCS_DEFAULT; + jme->reg_txpfc = 0; + jme->reg_pmcs = PMCS_MFEN; + jme->reg_gpreg1 = GPREG1_DEFAULT; + + if (jme->reg_rxmcs & RXMCS_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; + + /* + * Get Max Read Req Size from PCI Config Space + */ + pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); + jme->mrrs &= PCI_DCSR_MRRS_MASK; + switch (jme->mrrs) { + case MRRS_128B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; + break; + case MRRS_256B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; + break; + default: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; + break; + } + + /* + * Must check before reset_mac_processor + */ + jme_check_hw_ver(jme); + jme->mii_if.dev = netdev; + if (jme->fpgaver) { + jme->mii_if.phy_id = 0; + for (i = 1 ; i < 32 ; ++i) { + bmcr = jme_mdio_read(netdev, i, MII_BMCR); + bmsr = jme_mdio_read(netdev, i, MII_BMSR); + if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { + jme->mii_if.phy_id = i; + break; + } + } + + if (!jme->mii_if.phy_id) { + rc = -EIO; + pr_err("Can not find phy_id\n"); + goto err_out_unmap; + } + + jme->reg_ghc |= GHC_LINK_POLL; + } else { + jme->mii_if.phy_id = 1; + } + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme->mii_if.supports_gmii = true; + else + jme->mii_if.supports_gmii = false; + jme->mii_if.phy_id_mask = 0x1F; + jme->mii_if.reg_num_mask = 0x1F; + jme->mii_if.mdio_read = jme_mdio_read; + jme->mii_if.mdio_write = jme_mdio_write; + + jme_clear_pm(jme); + pci_set_power_state(jme->pdev, PCI_D0); + device_set_wakeup_enable(&pdev->dev, true); + + jme_set_phyfifo_5level(jme); + jme->pcirev = pdev->revision; + if (!jme->fpgaver) + jme_phy_init(jme); + jme_phy_off(jme); + + /* + * Reset MAC processor and reload EEPROM for MAC Address + */ + jme_reset_mac_processor(jme); + rc = jme_reload_eeprom(jme); + if (rc) { + pr_err("Reload eeprom for reading MAC Address error\n"); + goto err_out_unmap; + } + jme_load_macaddr(netdev); + + /* + * Tell stack that we are not ready to work until open() + */ + netif_carrier_off(netdev); + + rc = register_netdev(netdev); + if (rc) { + pr_err("Cannot register net device\n"); + goto err_out_unmap; + } + + netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n", + (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? + "JMC250 Gigabit Ethernet" : + (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? + "JMC260 Fast Ethernet" : "Unknown", + (jme->fpgaver != 0) ? " (FPGA)" : "", + (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, + jme->pcirev, netdev->dev_addr); + + return 0; + + err_out_unmap: + iounmap(jme->regs); + err_out_free_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + err_out_release_regions: + pci_release_regions(pdev); + err_out_disable_pdev: + pci_disable_device(pdev); + err_out: + return rc; + } + + static void __devexit + jme_remove_one(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + unregister_netdev(netdev); + iounmap(jme->regs); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + + } + + static void + jme_shutdown(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + jme_powersave_phy(jme); + pci_pme_active(pdev, true); + } + + #ifdef CONFIG_PM_SLEEP + static int + jme_suspend(struct device *dev) + { + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + ++ if (!netif_running(netdev)) ++ return 0; ++ + atomic_dec(&jme->link_changing); + + netif_device_detach(netdev); + netif_stop_queue(netdev); + jme_stop_irq(jme); + + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + if (netif_carrier_ok(netdev)) { + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_polling_mode(jme); + + jme_stop_pcc_timer(jme); + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + netif_carrier_off(netdev); + jme->phylink = 0; + } + + tasklet_enable(&jme->txclean_task); + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + + jme_powersave_phy(jme); + + return 0; + } + + static int + jme_resume(struct device *dev) + { + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + ++ if (!netif_running(netdev)) ++ return 0; ++ + jme_clear_pm(jme); + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + else + jme_reset_phy_processor(jme); + + jme_start_irq(jme); + netif_device_attach(netdev); + + atomic_inc(&jme->link_changing); + + jme_reset_link(jme); + + return 0; + } + + static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); + #define JME_PM_OPS (&jme_pm_ops) + + #else + + #define JME_PM_OPS NULL + #endif + + static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, + { } + }; + + static struct pci_driver jme_driver = { + .name = DRV_NAME, + .id_table = jme_pci_tbl, + .probe = jme_init_one, + .remove = __devexit_p(jme_remove_one), + .shutdown = jme_shutdown, + .driver.pm = JME_PM_OPS, + }; + + static int __init + jme_init_module(void) + { + pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION); + return pci_register_driver(&jme_driver); + } + + static void __exit + jme_cleanup_module(void) + { + pci_unregister_driver(&jme_driver); + } + + module_init(jme_init_module); + module_exit(jme_cleanup_module); + + MODULE_AUTHOR("Guo-Fu Tseng cooldavid@cooldavid.org"); + MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_VERSION); + MODULE_DEVICE_TABLE(pci, jme_pci_tbl); + diff --combined drivers/net/ethernet/mellanox/mlx4/en_tx.c index 0000000,75338eb..90f2cd2 mode 000000,100644..100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@@ -1,0 -1,816 +1,816 @@@ + /* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + + #include <asm/page.h> + #include <linux/mlx4/cq.h> + #include <linux/slab.h> + #include <linux/mlx4/qp.h> + #include <linux/skbuff.h> + #include <linux/if_vlan.h> + #include <linux/vmalloc.h> + #include <linux/tcp.h> + + #include "mlx4_en.h" + + enum { + MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ + MAX_BF = 256, + }; + + static int inline_thold __read_mostly = MAX_INLINE; + + module_param_named(inline_thold, inline_thold, int, 0444); + MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); + + int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, int qpn, u32 size, + u16 stride) + { + struct mlx4_en_dev *mdev = priv->mdev; + int tmp; + int err; + + ring->size = size; + ring->size_mask = size - 1; + ring->stride = stride; + + inline_thold = min(inline_thold, MAX_INLINE); + + spin_lock_init(&ring->comp_lock); + + tmp = size * sizeof(struct mlx4_en_tx_info); + ring->tx_info = vmalloc(tmp); + if (!ring->tx_info) { + en_err(priv, "Failed allocating tx_info ring\n"); + return -ENOMEM; + } + en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", + ring->tx_info, tmp); + + ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); + if (!ring->bounce_buf) { + en_err(priv, "Failed allocating bounce buffer\n"); + err = -ENOMEM; + goto err_tx; + } + ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); + + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, + 2 * PAGE_SIZE); + if (err) { + en_err(priv, "Failed allocating hwq resources\n"); + goto err_bounce; + } + + err = mlx4_en_map_buffer(&ring->wqres.buf); + if (err) { + en_err(priv, "Failed to map TX buffer\n"); + goto err_hwq_res; + } + + ring->buf = ring->wqres.buf.direct.buf; + + en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " + "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, + ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); + + ring->qpn = qpn; + err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); + if (err) { + en_err(priv, "Failed allocating qp %d\n", ring->qpn); + goto err_map; + } + ring->qp.event = mlx4_en_sqp_event; + + err = mlx4_bf_alloc(mdev->dev, &ring->bf); + if (err) { + en_dbg(DRV, priv, "working without blueflame (%d)", err); + ring->bf.uar = &mdev->priv_uar; + ring->bf.uar->map = mdev->uar_map; + ring->bf_enabled = false; + } else + ring->bf_enabled = true; + + return 0; + + err_map: + mlx4_en_unmap_buffer(&ring->wqres.buf); + err_hwq_res: + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + err_bounce: + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; + err_tx: + vfree(ring->tx_info); + ring->tx_info = NULL; + return err; + } + + void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring) + { + struct mlx4_en_dev *mdev = priv->mdev; + en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); + + if (ring->bf_enabled) + mlx4_bf_free(mdev->dev, &ring->bf); + mlx4_qp_remove(mdev->dev, &ring->qp); + mlx4_qp_free(mdev->dev, &ring->qp); + mlx4_qp_release_range(mdev->dev, ring->qpn, 1); + mlx4_en_unmap_buffer(&ring->wqres.buf); + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; + vfree(ring->tx_info); + ring->tx_info = NULL; + } + + int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int cq) + { + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + ring->cqn = cq; + ring->prod = 0; + ring->cons = 0xffffffff; + ring->last_nr_txbb = 1; + ring->poll_cnt = 0; + ring->blocked = 0; + memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); + memset(ring->buf, 0, ring->buf_size); + + ring->qp_state = MLX4_QP_STATE_RST; - ring->doorbell_qpn = swab32(ring->qp.qpn << 8); ++ ring->doorbell_qpn = ring->qp.qpn << 8; + + mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, + ring->cqn, &ring->context); + if (ring->bf_enabled) + ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); + + err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, + &ring->qp, &ring->qp_state); + + return err; + } + + void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring) + { + struct mlx4_en_dev *mdev = priv->mdev; + + mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); + } + + + static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner) + { + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; + struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; + struct sk_buff *skb = tx_info->skb; + struct skb_frag_struct *frag; + void *end = ring->buf + ring->buf_size; + int frags = skb_shinfo(skb)->nr_frags; + int i; + __be32 *ptr = (__be32 *)tx_desc; + __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); + + /* Optimize the common case when there are no wraparounds */ + if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { + if (!tx_info->inl) { + if (tx_info->linear) { + pci_unmap_single(mdev->pdev, + (dma_addr_t) be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + ++data; + } + + for (i = 0; i < frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + pci_unmap_page(mdev->pdev, + (dma_addr_t) be64_to_cpu(data[i].addr), + skb_frag_size(frag), PCI_DMA_TODEVICE); + } + } + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + } + + } else { + if (!tx_info->inl) { + if ((void *) data >= end) { + data = ring->buf + ((void *)data - end); + } + + if (tx_info->linear) { + pci_unmap_single(mdev->pdev, + (dma_addr_t) be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + ++data; + } + + for (i = 0; i < frags; i++) { + /* Check for wraparound before unmapping */ + if ((void *) data >= end) + data = ring->buf; + frag = &skb_shinfo(skb)->frags[i]; + pci_unmap_page(mdev->pdev, + (dma_addr_t) be64_to_cpu(data->addr), + skb_frag_size(frag), PCI_DMA_TODEVICE); + ++data; + } + } + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + if ((void *) ptr >= end) { + ptr = ring->buf; + stamp ^= cpu_to_be32(0x80000000); + } + } + + } + dev_kfree_skb_any(skb); + return tx_info->nr_txbb; + } + + + int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + int cnt = 0; + + /* Skip last polled descriptor */ + ring->cons += ring->last_nr_txbb; + en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", + ring->cons, ring->prod); + + if ((u32) (ring->prod - ring->cons) > ring->size) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Tx consumer passed producer!\n"); + return 0; + } + + while (ring->cons != ring->prod) { + ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, + ring->cons & ring->size_mask, + !!(ring->cons & ring->size)); + ring->cons += ring->last_nr_txbb; + cnt++; + } + + if (cnt) + en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); + + return cnt; + } + + + static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_cq *mcq = &cq->mcq; + struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + struct mlx4_cqe *cqe = cq->buf; + u16 index; + u16 new_index; + u32 txbbs_skipped = 0; + u32 cq_last_sav; + + /* index always points to the first TXBB of the last polled descriptor */ + index = ring->cons & ring->size_mask; + new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; + if (index == new_index) + return; + + if (!priv->port_up) + return; + + /* + * We use a two-stage loop: + * - the first samples the HW-updated CQE + * - the second frees TXBBs until the last sample + * This lets us amortize CQE cache misses, while still polling the CQ + * until is quiescent. + */ + cq_last_sav = mcq->cons_index; + do { + do { + /* Skip over last polled CQE */ + index = (index + ring->last_nr_txbb) & ring->size_mask; + txbbs_skipped += ring->last_nr_txbb; + + /* Poll next CQE */ + ring->last_nr_txbb = mlx4_en_free_tx_desc( + priv, ring, index, + !!((ring->cons + txbbs_skipped) & + ring->size)); + ++mcq->cons_index; + + } while (index != new_index); + + new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; + } while (index != new_index); + AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, + (u32) (mcq->cons_index - cq_last_sav)); + + /* + * To prevent CQ overflow we first update CQ consumer and only then + * the ring consumer. + */ + mlx4_cq_set_ci(mcq); + wmb(); + ring->cons += txbbs_skipped; + + /* Wakeup Tx queue if this ring stopped it */ + if (unlikely(ring->blocked)) { + if ((u32) (ring->prod - ring->cons) <= + ring->size - HEADROOM - MAX_DESC_TXBBS) { + ring->blocked = 0; + netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); + priv->port_stats.wake_queue++; + } + } + } + + void mlx4_en_tx_irq(struct mlx4_cq *mcq) + { + struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + + if (!spin_trylock(&ring->comp_lock)) + return; + mlx4_en_process_tx_cq(cq->dev, cq); + mod_timer(&cq->timer, jiffies + 1); + spin_unlock(&ring->comp_lock); + } + + + void mlx4_en_poll_tx_cq(unsigned long data) + { + struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + u32 inflight; + + INC_PERF_COUNTER(priv->pstats.tx_poll); + + if (!spin_trylock_irq(&ring->comp_lock)) { + mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); + return; + } + mlx4_en_process_tx_cq(cq->dev, cq); + inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); + + /* If there are still packets in flight and the timer has not already + * been scheduled by the Tx routine then schedule it here to guarantee + * completion processing of these packets */ + if (inflight && priv->port_up) + mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); + + spin_unlock_irq(&ring->comp_lock); + } + + static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + u32 index, + unsigned int desc_size) + { + u32 copy = (ring->size - index) * TXBB_SIZE; + int i; + + for (i = desc_size - copy - 4; i >= 0; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + i)) = + *((u32 *) (ring->bounce_buf + copy + i)); + } + + for (i = copy - 4; i >= 4 ; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = + *((u32 *) (ring->bounce_buf + i)); + } + + /* Return real descriptor location */ + return ring->buf + index * TXBB_SIZE; + } + + static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) + { + struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; + struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; + unsigned long flags; + + /* If we don't have a pending timer, set one up to catch our recent + post in case the interface becomes idle */ + if (!timer_pending(&cq->timer)) + mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); + + /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ + if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) + if (spin_trylock_irqsave(&ring->comp_lock, flags)) { + mlx4_en_process_tx_cq(priv->dev, cq); + spin_unlock_irqrestore(&ring->comp_lock, flags); + } + } + + static int is_inline(struct sk_buff *skb, void **pfrag) + { + void *ptr; + + if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { + if (skb_shinfo(skb)->nr_frags == 1) { + ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]); + if (unlikely(!ptr)) + return 0; + + if (pfrag) + *pfrag = ptr; + + return 1; + } else if (unlikely(skb_shinfo(skb)->nr_frags)) + return 0; + else + return 1; + } + + return 0; + } + + static int inline_size(struct sk_buff *skb) + { + if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) + <= MLX4_INLINE_ALIGN) + return ALIGN(skb->len + CTRL_SIZE + + sizeof(struct mlx4_wqe_inline_seg), 16); + else + return ALIGN(skb->len + CTRL_SIZE + 2 * + sizeof(struct mlx4_wqe_inline_seg), 16); + } + + static int get_real_size(struct sk_buff *skb, struct net_device *dev, + int *lso_header_size) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + int real_size; + + if (skb_is_gso(skb)) { + *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); + real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + + ALIGN(*lso_header_size + 4, DS_SIZE); + if (unlikely(*lso_header_size != skb_headlen(skb))) { + /* We add a segment for the skb linear buffer only if + * it contains data */ + if (*lso_header_size < skb_headlen(skb)) + real_size += DS_SIZE; + else { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Non-linear headers\n"); + return 0; + } + } + } else { + *lso_header_size = 0; + if (!is_inline(skb, NULL)) + real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; + else + real_size = inline_size(skb); + } + + return real_size; + } + + static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, + int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) + { + struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; + int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; + + if (skb->len <= spc) { + inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); + if (skb_shinfo(skb)->nr_frags) + memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, + skb_frag_size(&skb_shinfo(skb)->frags[0])); + + } else { + inl->byte_count = cpu_to_be32(1 << 31 | spc); + if (skb_headlen(skb) <= spc) { + skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); + if (skb_headlen(skb) < spc) { + memcpy(((void *)(inl + 1)) + skb_headlen(skb), + fragptr, spc - skb_headlen(skb)); + fragptr += spc - skb_headlen(skb); + } + inl = (void *) (inl + 1) + spc; + memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); + } else { + skb_copy_from_linear_data(skb, inl + 1, spc); + inl = (void *) (inl + 1) + spc; + skb_copy_from_linear_data_offset(skb, spc, inl + 1, + skb_headlen(skb) - spc); + if (skb_shinfo(skb)->nr_frags) + memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, + fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0])); + } + + wmb(); + inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); + } + tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); + tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; + } + + u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + u16 vlan_tag = 0; + + /* If we support per priority flow control and the packet contains + * a vlan tag, send the packet to the TX ring assigned to that priority + */ + if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) { + vlan_tag = vlan_tx_tag_get(skb); + return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); + } + + return skb_tx_hash(dev, skb); + } + + static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) + { + __iowrite64_copy(dst, src, bytecnt / 8); + } + + netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring *ring; + struct mlx4_en_cq *cq; + struct mlx4_en_tx_desc *tx_desc; + struct mlx4_wqe_data_seg *data; + struct skb_frag_struct *frag; + struct mlx4_en_tx_info *tx_info; + struct ethhdr *ethh; + u64 mac; + u32 mac_l, mac_h; + int tx_ind = 0; + int nr_txbb; + int desc_size; + int real_size; + dma_addr_t dma; + u32 index, bf_index; + __be32 op_own; + u16 vlan_tag = 0; + int i; + int lso_header_size; + void *fragptr; + bool bounce = false; + + if (!priv->port_up) + goto tx_drop; + + real_size = get_real_size(skb, dev, &lso_header_size); + if (unlikely(!real_size)) + goto tx_drop; + + /* Align descriptor to TXBB size */ + desc_size = ALIGN(real_size, TXBB_SIZE); + nr_txbb = desc_size / TXBB_SIZE; + if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Oversized header or SG list\n"); + goto tx_drop; + } + + tx_ind = skb->queue_mapping; + ring = &priv->tx_ring[tx_ind]; + if (vlan_tx_tag_present(skb)) + vlan_tag = vlan_tx_tag_get(skb); + + /* Check available TXBBs And 2K spare for prefetch */ + if (unlikely(((int)(ring->prod - ring->cons)) > + ring->size - HEADROOM - MAX_DESC_TXBBS)) { + /* every full Tx ring stops queue */ + netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); + ring->blocked = 1; + priv->port_stats.queue_stopped++; + + /* Use interrupts to find out when queue opened */ + cq = &priv->tx_cq[tx_ind]; + mlx4_en_arm_cq(priv, cq); + return NETDEV_TX_BUSY; + } + + /* Track current inflight packets for performance analysis */ + AVG_PERF_COUNTER(priv->pstats.inflight_avg, + (u32) (ring->prod - ring->cons - 1)); + + /* Packet is good - grab an index and transmit it */ + index = ring->prod & ring->size_mask; + bf_index = ring->prod; + + /* See if we have enough space for whole descriptor TXBB for setting + * SW ownership on next descriptor; if not, use a bounce buffer. */ + if (likely(index + nr_txbb <= ring->size)) + tx_desc = ring->buf + index * TXBB_SIZE; + else { + tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; + bounce = true; + } + + /* Save skb in tx_info ring */ + tx_info = &ring->tx_info[index]; + tx_info->skb = skb; + tx_info->nr_txbb = nr_txbb; + + /* Prepare ctrl segement apart opcode+ownership, which depends on + * whether LSO is used */ + tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; + tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; + tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | + MLX4_WQE_CTRL_SOLICITED); + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + ring->tx_csum++; + } + + if (unlikely(priv->validate_loopback)) { + /* Copy dst mac address to wqe */ + skb_reset_mac_header(skb); + ethh = eth_hdr(skb); + if (ethh && ethh->h_dest) { + mac = mlx4_en_mac_to_u64(ethh->h_dest); + mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); + mac_l = (u32) (mac & 0xffffffff); + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); + tx_desc->ctrl.imm = cpu_to_be32(mac_l); + } + } + + /* Handle LSO (TSO) packets */ + if (lso_header_size) { + /* Mark opcode as LSO */ + op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + + /* Fill in the LSO prefix */ + tx_desc->lso.mss_hdr_size = cpu_to_be32( + skb_shinfo(skb)->gso_size << 16 | lso_header_size); + + /* Copy headers; + * note that we already verified that it is linear */ + memcpy(tx_desc->lso.header, skb->data, lso_header_size); + data = ((void *) &tx_desc->lso + + ALIGN(lso_header_size + 4, DS_SIZE)); + + priv->port_stats.tso_packets++; + i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + + !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); + ring->bytes += skb->len + (i - 1) * lso_header_size; + ring->packets += i; + } else { + /* Normal (Non LSO) packet */ + op_own = cpu_to_be32(MLX4_OPCODE_SEND) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + data = &tx_desc->data; + ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); + ring->packets++; + + } + AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); + + + /* valid only for none inline segments */ + tx_info->data_offset = (void *) data - (void *) tx_desc; + + tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; + data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; + + if (!is_inline(skb, &fragptr)) { + /* Map fragments */ + for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { + frag = &skb_shinfo(skb)->frags[i]; + dma = skb_frag_dma_map(&mdev->dev->pdev->dev, frag, + 0, skb_frag_size(frag), + DMA_TO_DEVICE); + data->addr = cpu_to_be64(dma); + data->lkey = cpu_to_be32(mdev->mr.key); + wmb(); + data->byte_count = cpu_to_be32(skb_frag_size(frag)); + --data; + } + + /* Map linear part */ + if (tx_info->linear) { + dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size, + skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); + data->addr = cpu_to_be64(dma); + data->lkey = cpu_to_be32(mdev->mr.key); + wmb(); + data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); + } + tx_info->inl = 0; + } else { + build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); + tx_info->inl = 1; + } + + ring->prod += nr_txbb; + + /* If we used a bounce buffer then copy descriptor back into place */ + if (bounce) + tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); + + /* Run destructor before passing skb to HW */ + if (likely(!skb_shared(skb))) + skb_orphan(skb); + + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { - *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; ++ *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); + op_own |= htonl((bf_index & 0xffff) << 8); + /* Ensure new descirptor hits memory + * before setting ownership of this descriptor to HW */ + wmb(); + tx_desc->ctrl.owner_opcode = op_own; + + wmb(); + + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, + desc_size); + + wmb(); + + ring->bf.offset ^= ring->bf.buf_size; + } else { + /* Ensure new descirptor hits memory + * before setting ownership of this descriptor to HW */ + wmb(); + tx_desc->ctrl.owner_opcode = op_own; + wmb(); - writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); ++ iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); + } + + /* Poll CQ here */ + mlx4_en_xmit_poll(priv, tx_ind); + + return NETDEV_TX_OK; + + tx_drop: + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; + } + diff --combined drivers/net/ethernet/mellanox/mlx4/eq.c index 869a2c2,1ad1f60..869a2c2 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@@ -484,7 -484,7 +484,7 @@@ static void mlx4_free_eq(struct mlx4_de
mlx4_mtt_cleanup(dev, &eq->mtt); for (i = 0; i < npages; ++i) - pci_free_consistent(dev->pdev, PAGE_SIZE, + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, eq->page_list[i].buf, eq->page_list[i].map);
diff --combined drivers/net/ethernet/mellanox/mlx4/fw.c index 0000000,ed452dd..abdfbac mode 000000,100644..100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@@ -1,0 -1,945 +1,951 @@@ + /* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + #include <linux/mlx4/cmd.h> + #include <linux/cache.h> + + #include "fw.h" + #include "icm.h" + + enum { + MLX4_COMMAND_INTERFACE_MIN_REV = 2, + MLX4_COMMAND_INTERFACE_MAX_REV = 3, + MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, + }; + + extern void __buggy_use_of_MLX4_GET(void); + extern void __buggy_use_of_MLX4_PUT(void); + + static int enable_qos; + module_param(enable_qos, bool, 0444); + MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)"); + + #define MLX4_GET(dest, source, offset) \ + do { \ + void *__p = (char *) (source) + (offset); \ + switch (sizeof (dest)) { \ + case 1: (dest) = *(u8 *) __p; break; \ + case 2: (dest) = be16_to_cpup(__p); break; \ + case 4: (dest) = be32_to_cpup(__p); break; \ + case 8: (dest) = be64_to_cpup(__p); break; \ + default: __buggy_use_of_MLX4_GET(); \ + } \ + } while (0) + + #define MLX4_PUT(dest, source, offset) \ + do { \ + void *__d = ((char *) (dest) + (offset)); \ + switch (sizeof(source)) { \ + case 1: *(u8 *) __d = (source); break; \ + case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ + case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ + case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ + default: __buggy_use_of_MLX4_PUT(); \ + } \ + } while (0) + + static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) + { + static const char *fname[] = { + [ 0] = "RC transport", + [ 1] = "UC transport", + [ 2] = "UD transport", + [ 3] = "XRC transport", + [ 4] = "reliable multicast", + [ 5] = "FCoIB support", + [ 6] = "SRQ support", + [ 7] = "IPoIB checksum offload", + [ 8] = "P_Key violation counter", + [ 9] = "Q_Key violation counter", + [10] = "VMM", + [12] = "DPDP", + [15] = "Big LSO headers", + [16] = "MW support", + [17] = "APM support", + [18] = "Atomic ops support", + [19] = "Raw multicast support", + [20] = "Address vector port checking support", + [21] = "UD multicast support", + [24] = "Demand paging support", + [25] = "Router support", + [30] = "IBoE support", + [32] = "Unicast loopback support", + [34] = "FCS header control", + [38] = "Wake On LAN support", + [40] = "UDP RSS support", + [41] = "Unicast VEP steering support", + [42] = "Multicast VEP steering support", + [48] = "Counters support", + }; + int i; + + mlx4_dbg(dev, "DEV_CAP flags:\n"); + for (i = 0; i < ARRAY_SIZE(fname); ++i) + if (fname[i] && (flags & (1LL << i))) + mlx4_dbg(dev, " %s\n", fname[i]); + } + + int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) + { + struct mlx4_cmd_mailbox *mailbox; + u32 *inbox; + int err = 0; + + #define MOD_STAT_CFG_IN_SIZE 0x100 + + #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 + #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + memset(inbox, 0, MOD_STAT_CFG_IN_SIZE); + + MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); + MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, + MLX4_CMD_TIME_CLASS_A); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) + { + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field; + u32 field32, flags, ext_flags; + u16 size; + u16 stat_rate; + int err; + int i; + + #define QUERY_DEV_CAP_OUT_SIZE 0x100 + #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 + #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 + #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 + #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 + #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 + #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 + #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 + #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 + #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 + #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a + #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b + #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d + #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e + #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f + #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 + #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 + #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 + #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 + #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 + #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 + #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b + #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d + #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f + #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 + #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 + #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 + #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 + #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 + #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b + #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c + #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f + #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 + #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 + #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 + #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 + #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b + #define QUERY_DEV_CAP_BF_OFFSET 0x4c + #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d + #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e + #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f + #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 + #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 + #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 + #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 + #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 + #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 + #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 + #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 + #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 ++#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 ++#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 + #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 + #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 + #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 + #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 + #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 + #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 + #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a + #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c + #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e + #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 + #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 + #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 + #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 + #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); + dev_cap->reserved_qps = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); + dev_cap->max_qps = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); + dev_cap->reserved_srqs = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); + dev_cap->max_srqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); + dev_cap->max_cq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); + dev_cap->reserved_cqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); + dev_cap->max_cqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); + dev_cap->max_mpts = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); + dev_cap->reserved_eqs = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); + dev_cap->max_eqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); + dev_cap->reserved_mtts = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET); + dev_cap->max_mrw_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); + dev_cap->reserved_mrws = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); + dev_cap->max_mtt_seg = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); + dev_cap->max_requester_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); + dev_cap->max_responder_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); + field &= 0x1f; + if (!field) + dev_cap->max_gso_sz = 0; + else + dev_cap->max_gso_sz = 1 << field; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); + dev_cap->max_rdma_global = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); + dev_cap->local_ca_ack_delay = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + dev_cap->num_ports = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); + dev_cap->max_msg_sz = 1 << (field & 0x1f); + MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); + dev_cap->stat_rate_support = stat_rate; + MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); + dev_cap->flags = flags | (u64)ext_flags << 32; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); + dev_cap->reserved_uars = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); + dev_cap->uar_size = 1 << ((field & 0x3f) + 20); + MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); + dev_cap->min_page_sz = 1 << field; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); + if (field & 0x80) { + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); + dev_cap->bf_reg_size = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); + if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) + field = 3; + dev_cap->bf_regs_per_page = 1 << (field & 0x3f); + mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", + dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); + } else { + dev_cap->bf_reg_size = 0; + mlx4_dbg(dev, "BlueFlame not available\n"); + } + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); + dev_cap->max_sq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); + dev_cap->max_sq_desc_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); + dev_cap->max_qp_per_mcg = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); + dev_cap->reserved_mgms = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); + dev_cap->max_mcgs = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); + dev_cap->reserved_pds = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); + dev_cap->max_pds = 1 << (field & 0x3f); ++ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); ++ dev_cap->reserved_xrcds = field >> 4; ++ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); ++ dev_cap->max_xrcds = 1 << (field & 0x1f); + + MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); + dev_cap->rdmarc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); + dev_cap->qpc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); + dev_cap->aux_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); + dev_cap->altc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); + dev_cap->eqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); + dev_cap->cqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); + dev_cap->srq_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); + dev_cap->cmpt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); + dev_cap->mtt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); + dev_cap->dmpt_entry_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); + dev_cap->max_srq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); + dev_cap->max_qp_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); + dev_cap->resize_srq = field & 1; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); + dev_cap->max_rq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); + dev_cap->max_rq_desc_sz = size; + + MLX4_GET(dev_cap->bmme_flags, outbox, + QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + MLX4_GET(dev_cap->reserved_lkey, outbox, + QUERY_DEV_CAP_RSVD_LKEY_OFFSET); + MLX4_GET(dev_cap->max_icm_sz, outbox, + QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); + if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) + MLX4_GET(dev_cap->max_counters, outbox, + QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + for (i = 1; i <= dev_cap->num_ports; ++i) { + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + dev_cap->max_vl[i] = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); + dev_cap->ib_mtu[i] = field >> 4; + dev_cap->max_port_width[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); + dev_cap->max_gids[i] = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); + dev_cap->max_pkeys[i] = 1 << (field & 0xf); + } + } else { + #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 + #define QUERY_PORT_MTU_OFFSET 0x01 + #define QUERY_PORT_ETH_MTU_OFFSET 0x02 + #define QUERY_PORT_WIDTH_OFFSET 0x06 + #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 + #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a + #define QUERY_PORT_MAX_VL_OFFSET 0x0b + #define QUERY_PORT_MAC_OFFSET 0x10 + #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 + #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c + #define QUERY_PORT_TRANS_CODE_OFFSET 0x20 + + for (i = 1; i <= dev_cap->num_ports; ++i) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, + MLX4_CMD_TIME_CLASS_B); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); + dev_cap->supported_port_types[i] = field & 3; + MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); + dev_cap->ib_mtu[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); + dev_cap->max_port_width[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); + dev_cap->max_gids[i] = 1 << (field >> 4); + dev_cap->max_pkeys[i] = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); + dev_cap->max_vl[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); + dev_cap->log_max_macs[i] = field & 0xf; + dev_cap->log_max_vlans[i] = field >> 4; + MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); + MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); + MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); + dev_cap->trans_type[i] = field32 >> 24; + dev_cap->vendor_oui[i] = field32 & 0xffffff; + MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET); + MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET); + } + } + + mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", + dev_cap->bmme_flags, dev_cap->reserved_lkey); + + /* + * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then + * we can't use any EQs whose doorbell falls on that page, + * even if the EQ itself isn't reserved. + */ + dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, + dev_cap->reserved_eqs); + + mlx4_dbg(dev, "Max ICM size %lld MB\n", + (unsigned long long) dev_cap->max_icm_sz >> 20); + mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", + dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); + mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); + mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", + dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); + mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); + mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", + dev_cap->reserved_mrws, dev_cap->reserved_mtts); + mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", + dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); + mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", + dev_cap->max_pds, dev_cap->reserved_mgms); + mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", + dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); + mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", + dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1], + dev_cap->max_port_width[1]); + mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", + dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); + mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", + dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); + mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); + mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); + + dump_dev_cap_flags(dev, dev_cap->flags); + + out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) + { + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_icm_iter iter; + __be64 *pages; + int lg; + int nent = 0; + int i; + int err = 0; + int ts = 0, tc = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + pages = mailbox->buf; + + for (mlx4_icm_first(icm, &iter); + !mlx4_icm_last(&iter); + mlx4_icm_next(&iter)) { + /* + * We have to pass pages that are aligned to their + * size, so find the least significant 1 in the + * address or size and use that as our log2 size. + */ + lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; + if (lg < MLX4_ICM_PAGE_SHIFT) { + mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", + MLX4_ICM_PAGE_SIZE, + (unsigned long long) mlx4_icm_addr(&iter), + mlx4_icm_size(&iter)); + err = -EINVAL; + goto out; + } + + for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { + if (virt != -1) { + pages[nent * 2] = cpu_to_be64(virt); + virt += 1 << lg; + } + + pages[nent * 2 + 1] = + cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | + (lg - MLX4_ICM_PAGE_SHIFT)); + ts += 1 << (lg - 10); + ++tc; + + if (++nent == MLX4_MAILBOX_SIZE / 16) { + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, + MLX4_CMD_TIME_CLASS_B); + if (err) + goto out; + nent = 0; + } + } + } + + if (nent) + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B); + if (err) + goto out; + + switch (op) { + case MLX4_CMD_MAP_FA: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM_AUX: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM: + mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", + tc, ts, (unsigned long long) virt - (ts << 10)); + break; + } + + out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) + { + return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); + } + + int mlx4_UNMAP_FA(struct mlx4_dev *dev) + { + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B); + } + + + int mlx4_RUN_FW(struct mlx4_dev *dev) + { + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A); + } + + int mlx4_QUERY_FW(struct mlx4_dev *dev) + { + struct mlx4_fw *fw = &mlx4_priv(dev)->fw; + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err = 0; + u64 fw_ver; + u16 cmd_if_rev; + u8 lg; + + #define QUERY_FW_OUT_SIZE 0x100 + #define QUERY_FW_VER_OFFSET 0x00 + #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a + #define QUERY_FW_MAX_CMD_OFFSET 0x0f + #define QUERY_FW_ERR_START_OFFSET 0x30 + #define QUERY_FW_ERR_SIZE_OFFSET 0x38 + #define QUERY_FW_ERR_BAR_OFFSET 0x3c + + #define QUERY_FW_SIZE_OFFSET 0x00 + #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 + #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, + MLX4_CMD_TIME_CLASS_A); + if (err) + goto out; + + MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); + /* + * FW subminor version is at more significant bits than minor + * version, so swap here. + */ + dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | + ((fw_ver & 0xffff0000ull) >> 16) | + ((fw_ver & 0x0000ffffull) << 16); + + MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || + cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { + mlx4_err(dev, "Installed FW has unsupported " + "command interface revision %d.\n", + cmd_if_rev); + mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", + (int) (dev->caps.fw_ver >> 32), + (int) (dev->caps.fw_ver >> 16) & 0xffff, + (int) dev->caps.fw_ver & 0xffff); + mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", + MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); + err = -ENODEV; + goto out; + } + + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) + dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; + + MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); + cmd->max_cmds = 1 << lg; + + mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", + (int) (dev->caps.fw_ver >> 32), + (int) (dev->caps.fw_ver >> 16) & 0xffff, + (int) dev->caps.fw_ver & 0xffff, + cmd_if_rev, cmd->max_cmds); + + MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); + MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); + MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); + fw->catas_bar = (fw->catas_bar >> 6) * 2; + + mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", + (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); + + MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); + MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); + MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); + fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; + + mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + fw->fw_pages = + ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", + (unsigned long long) fw->clr_int_base, fw->clr_int_bar); + + out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + static void get_board_id(void *vsd, char *board_id) + { + int i; + + #define VSD_OFFSET_SIG1 0x00 + #define VSD_OFFSET_SIG2 0xde + #define VSD_OFFSET_MLX_BOARD_ID 0xd0 + #define VSD_OFFSET_TS_BOARD_ID 0x20 + + #define VSD_SIGNATURE_TOPSPIN 0x5ad + + memset(board_id, 0, MLX4_BOARD_ID_LEN); + + if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && + be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { + strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); + } else { + /* + * The board ID is a string but the firmware byte + * swaps each 4-byte word before passing it back to + * us. Therefore we need to swab it before printing. + */ + for (i = 0; i < 4; ++i) + ((u32 *) board_id)[i] = + swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); + } + } + + int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) + { + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err; + + #define QUERY_ADAPTER_OUT_SIZE 0x100 + #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 + #define QUERY_ADAPTER_VSD_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, + MLX4_CMD_TIME_CLASS_A); + if (err) + goto out; + + MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + + get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, + adapter->board_id); + + out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) + { + struct mlx4_cmd_mailbox *mailbox; + __be32 *inbox; + int err; + + #define INIT_HCA_IN_SIZE 0x200 + #define INIT_HCA_VERSION_OFFSET 0x000 + #define INIT_HCA_VERSION 2 + #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e + #define INIT_HCA_FLAGS_OFFSET 0x014 + #define INIT_HCA_QPC_OFFSET 0x020 + #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) + #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) + #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) + #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) + #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) + #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) + #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) + #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) + #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) + #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) + #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) + #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) + #define INIT_HCA_MCAST_OFFSET 0x0c0 + #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) + #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) + #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) + #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) + #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) + #define INIT_HCA_TPT_OFFSET 0x0f0 + #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) + #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) + #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) + #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) + #define INIT_HCA_UAR_OFFSET 0x120 + #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) + #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + memset(inbox, 0, INIT_HCA_IN_SIZE); + + *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; + + *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = + (ilog2(cache_line_size()) - 4) << 5; + + #if defined(__LITTLE_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); + #elif defined(__BIG_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); + #else + #error Host endianness not defined + #endif + /* Check port for UD address vector: */ + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); + + /* Enable IPoIB checksumming if we can: */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); + + /* Enable QoS support if module parameter set */ + if (enable_qos) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); + + /* enable counters */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); + + /* QPC/EEC/CQC/EQC/RDMARC attributes */ + + MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); + MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); + MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); + MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); + MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); + MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); + + /* multicast attributes */ + + MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) + MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); + MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + + /* TPT attributes */ + + MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); + MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); + + /* UAR attributes */ + + MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000); + + if (err) + mlx4_err(dev, "INIT_HCA returns %d\n", err); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) + { + struct mlx4_cmd_mailbox *mailbox; + u32 *inbox; + int err; + u32 flags; + u16 field; + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + #define INIT_PORT_IN_SIZE 256 + #define INIT_PORT_FLAGS_OFFSET 0x00 + #define INIT_PORT_FLAG_SIG (1 << 18) + #define INIT_PORT_FLAG_NG (1 << 17) + #define INIT_PORT_FLAG_G0 (1 << 16) + #define INIT_PORT_VL_SHIFT 4 + #define INIT_PORT_PORT_WIDTH_SHIFT 8 + #define INIT_PORT_MTU_OFFSET 0x04 + #define INIT_PORT_MAX_GID_OFFSET 0x06 + #define INIT_PORT_MAX_PKEY_OFFSET 0x0a + #define INIT_PORT_GUID0_OFFSET 0x10 + #define INIT_PORT_NODE_GUID_OFFSET 0x18 + #define INIT_PORT_SI_GUID_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + memset(inbox, 0, INIT_PORT_IN_SIZE); + + flags = 0; + flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; + flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; + MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); + + field = 128 << dev->caps.ib_mtu_cap[port]; + MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); + field = dev->caps.gid_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); + field = dev->caps.pkey_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A); + + mlx4_free_cmd_mailbox(dev, mailbox); + } else + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A); + + return err; + } + EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); + + int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) + { + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000); + } + EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); + + int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) + { + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000); + } + + int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) + { + int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, + MLX4_CMD_SET_ICM_SIZE, + MLX4_CMD_TIME_CLASS_A); + if (ret) + return ret; + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + return 0; + } + + int mlx4_NOP(struct mlx4_dev *dev) + { + /* Input modifier of 0x1f means "finish as soon as possible." */ + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); + } + + #define MLX4_WOL_SETUP_MODE (5 << 28) + int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) + { + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; + + return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A); + } + EXPORT_SYMBOL_GPL(mlx4_wol_read); + + int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) + { + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; + + return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, + MLX4_CMD_TIME_CLASS_A); + } + EXPORT_SYMBOL_GPL(mlx4_wol_write); diff --combined drivers/net/ethernet/mellanox/mlx4/fw.h index bf5ec22,1e8ecc3..bf5ec22 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@@ -93,8 -93,6 +93,8 @@@ struct mlx4_dev_cap int max_mcgs; int reserved_pds; int max_pds; + int reserved_xrcds; + int max_xrcds; int qpc_entry_sz; int rdmarc_entry_sz; int altc_entry_sz; diff --combined drivers/net/ethernet/mellanox/mlx4/main.c index 660b691,f0ee35d..660b691 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@@ -220,10 -220,6 +220,10 @@@ static int mlx4_dev_cap(struct mlx4_de dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_pds = dev_cap->reserved_pds; + dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? + dev_cap->reserved_xrcds : 0; + dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? + dev_cap->max_xrcds : 0; dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); @@@ -916,18 -912,11 +916,18 @@@ static int mlx4_setup_hca(struct mlx4_d goto err_kar_unmap; }
+ err = mlx4_init_xrcd_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "reliable connection domain table, aborting.\n"); + goto err_pd_table_free; + } + err = mlx4_init_mr_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "memory region table, aborting.\n"); - goto err_pd_table_free; + goto err_xrcd_table_free; }
err = mlx4_init_eq_table(dev); @@@ -1044,9 -1033,6 +1044,9 @@@ err_eq_table_free err_mr_table_free: mlx4_cleanup_mr_table(dev);
+err_xrcd_table_free: + mlx4_cleanup_xrcd_table(dev); + err_pd_table_free: mlx4_cleanup_pd_table(dev);
@@@ -1369,7 -1355,6 +1369,7 @@@ err_port mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mr_table(dev); + mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); mlx4_cleanup_uar_table(dev);
@@@ -1431,7 -1416,6 +1431,7 @@@ static void mlx4_remove_one(struct pci_ mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mr_table(dev); + mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev);
iounmap(priv->kar); diff --combined drivers/net/ethernet/mellanox/mlx4/mlx4.h index fee2d05,a2fcd84..fee2d05 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@@ -335,7 -335,6 +335,7 @@@ struct mlx4_priv struct mlx4_cmd cmd;
struct mlx4_bitmap pd_bitmap; + struct mlx4_bitmap xrcd_bitmap; struct mlx4_uar_table uar_table; struct mlx4_mr_table mr_table; struct mlx4_cq_table cq_table; @@@ -385,7 -384,6 +385,7 @@@ int mlx4_alloc_eq_table(struct mlx4_de void mlx4_free_eq_table(struct mlx4_dev *dev);
int mlx4_init_pd_table(struct mlx4_dev *dev); +int mlx4_init_xrcd_table(struct mlx4_dev *dev); int mlx4_init_uar_table(struct mlx4_dev *dev); int mlx4_init_mr_table(struct mlx4_dev *dev); int mlx4_init_eq_table(struct mlx4_dev *dev); @@@ -395,7 -393,6 +395,7 @@@ int mlx4_init_srq_table(struct mlx4_de int mlx4_init_mcg_table(struct mlx4_dev *dev);
void mlx4_cleanup_pd_table(struct mlx4_dev *dev); +void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev); void mlx4_cleanup_uar_table(struct mlx4_dev *dev); void mlx4_cleanup_mr_table(struct mlx4_dev *dev); void mlx4_cleanup_eq_table(struct mlx4_dev *dev); diff --combined drivers/net/ethernet/mellanox/mlx4/mr.c index ab639cf,9c188bd..ab639cf --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@@ -139,7 -139,7 +139,7 @@@ static int mlx4_buddy_init(struct mlx4_
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), GFP_KERNEL); - buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *), + buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); if (!buddy->bits || !buddy->num_free) goto err_out; diff --combined drivers/net/ethernet/mellanox/mlx4/pd.c index 3736163,1286b88..3736163 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@@ -61,24 -61,6 +61,24 @@@ void mlx4_pd_free(struct mlx4_dev *dev } EXPORT_SYMBOL_GPL(mlx4_pd_free);
+int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap); + if (*xrcdn == -1) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc); + +void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn); +} +EXPORT_SYMBOL_GPL(mlx4_xrcd_free); + int mlx4_init_pd_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@@ -92,18 -74,6 +92,18 @@@ void mlx4_cleanup_pd_table(struct mlx4_ mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); }
+int mlx4_init_xrcd_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16), + (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0); +} + +void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap); +}
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) { diff --combined drivers/net/ethernet/mellanox/mlx4/port.c index 0000000,163a314..836338a mode 000000,100644..100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@@ -1,0 -1,488 +1,492 @@@ + /* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + #include <linux/errno.h> + #include <linux/if_ether.h> + + #include <linux/mlx4/cmd.h> + + #include "mlx4.h" + + #define MLX4_MAC_VALID (1ull << 63) + #define MLX4_MAC_MASK 0xffffffffffffULL + + #define MLX4_VLAN_VALID (1u << 31) + #define MLX4_VLAN_MASK 0xfff + + void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) + { + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + table->entries[i] = 0; + table->refs[i] = 0; + } + table->max = 1 << dev->caps.log_num_macs; + table->total = 0; + } + + void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) + { + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + table->entries[i] = 0; + table->refs[i] = 0; + } + table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; + table->total = 0; + } + + static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, + __be64 *entries) + { + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); + + in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, + u64 mac, int *qpn, u8 reserve) + { + struct mlx4_qp qp; + u8 gid[16] = {0}; + int err; + + if (reserve) { + err = mlx4_qp_reserve_range(dev, 1, 1, qpn); + if (err) { + mlx4_err(dev, "Failed to reserve qp for mac registration\n"); + return err; + } + } + qp.qpn = *qpn; + + mac &= 0xffffffffffffULL; + mac = cpu_to_be64(mac << 16); + memcpy(&gid[10], &mac, ETH_ALEN); + gid[5] = port; + gid[7] = MLX4_UC_STEER << 1; + + err = mlx4_qp_attach_common(dev, &qp, gid, 0, + MLX4_PROT_ETH, MLX4_UC_STEER); + if (err && reserve) + mlx4_qp_release_range(dev, *qpn, 1); + + return err; + } + + static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, + u64 mac, int qpn, u8 free) + { + struct mlx4_qp qp; + u8 gid[16] = {0}; + + qp.qpn = qpn; + mac &= 0xffffffffffffULL; + mac = cpu_to_be64(mac << 16); + memcpy(&gid[10], &mac, ETH_ALEN); + gid[5] = port; + gid[7] = MLX4_UC_STEER << 1; + + mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER); + if (free) + mlx4_qp_release_range(dev, qpn, 1); + } + + int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) + { + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + struct mlx4_mac_entry *entry; + int i, err = 0; + int free = -1; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); - if (!err) { - entry = kmalloc(sizeof *entry, GFP_KERNEL); - if (!entry) { - mlx4_uc_steer_release(dev, port, mac, *qpn, 1); - return -ENOMEM; - } - entry->mac = mac; - err = radix_tree_insert(&info->mac_tree, *qpn, entry); - if (err) { - mlx4_uc_steer_release(dev, port, mac, *qpn, 1); - return err; - } - } else ++ if (err) + return err; ++ ++ entry = kmalloc(sizeof *entry, GFP_KERNEL); ++ if (!entry) { ++ mlx4_uc_steer_release(dev, port, mac, *qpn, 1); ++ return -ENOMEM; ++ } ++ ++ entry->mac = mac; ++ err = radix_tree_insert(&info->mac_tree, *qpn, entry); ++ if (err) { ++ kfree(entry); ++ mlx4_uc_steer_release(dev, port, mac, *qpn, 1); ++ return err; ++ } + } ++ + mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); ++ + mutex_lock(&table->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { + if (free < 0 && !table->refs[i]) { + free = i; + continue; + } + + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + /* MAC already registered, increase references count */ + ++table->refs[i]; + goto out; + } + } + + if (free < 0) { + err = -ENOMEM; + goto out; + } + + mlx4_dbg(dev, "Free MAC index is %d\n", free); + + if (table->total == table->max) { + /* No free mac entries */ + err = -ENOSPC; + goto out; + } + + /* Register new MAC */ + table->refs[free] = 1; + table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac); + table->refs[free] = 0; + table->entries[free] = 0; + goto out; + } + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) + *qpn = info->base_qpn + free; + ++table->total; + out: + mutex_unlock(&table->mutex); + return err; + } + EXPORT_SYMBOL_GPL(mlx4_register_mac); + + static int validate_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, int index) + { + int err = 0; + + if (index < 0 || index >= table->max || !table->entries[index]) { + mlx4_warn(dev, "No valid Mac entry for the given index\n"); + err = -EINVAL; + } + return err; + } + + static int find_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, u64 mac) + { + int i; + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) + return i; + } + /* Mac not found */ + return -EINVAL; + } + + void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) + { + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int index = qpn - info->base_qpn; + struct mlx4_mac_entry *entry; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + entry = radix_tree_lookup(&info->mac_tree, qpn); + if (entry) { + mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1); + radix_tree_delete(&info->mac_tree, qpn); + index = find_index(dev, table, entry->mac); + kfree(entry); + } + } + + mutex_lock(&table->mutex); + + if (validate_index(dev, table, index)) + goto out; + + /* Check whether this address has reference count */ + if (!(--table->refs[index])) { + table->entries[index] = 0; + mlx4_set_port_mac_table(dev, port, table->entries); + --table->total; + } + out: + mutex_unlock(&table->mutex); + } + EXPORT_SYMBOL_GPL(mlx4_unregister_mac); + + int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap) + { + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int index = qpn - info->base_qpn; + struct mlx4_mac_entry *entry; + int err; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + entry = radix_tree_lookup(&info->mac_tree, qpn); + if (!entry) + return -EINVAL; + index = find_index(dev, table, entry->mac); + mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0); + entry->mac = new_mac; + err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0); + if (err || index < 0) + return err; + } + + mutex_lock(&table->mutex); + + err = validate_index(dev, table, index); + if (err) + goto out; + + table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); + table->entries[index] = 0; + } + out: + mutex_unlock(&table->mutex); + return err; + } + EXPORT_SYMBOL_GPL(mlx4_replace_mac); + static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, + __be32 *entries) + { + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); + in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; + } + + int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) + { + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int i; + + for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) { + if (table->refs[i] && + (vid == (MLX4_VLAN_MASK & + be32_to_cpu(table->entries[i])))) { + /* VLAN already registered, increase reference count */ + *idx = i; + return 0; + } + } + + return -ENOENT; + } + EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); + + int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) + { + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int i, err = 0; + int free = -1; + + mutex_lock(&table->mutex); + + if (table->total == table->max) { + /* No free vlan entries */ + err = -ENOSPC; + goto out; + } + + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { + if (free < 0 && (table->refs[i] == 0)) { + free = i; + continue; + } + + if (table->refs[i] && + (vlan == (MLX4_VLAN_MASK & + be32_to_cpu(table->entries[i])))) { + /* Vlan already registered, increase references count */ + *index = i; + ++table->refs[i]; + goto out; + } + } + + if (free < 0) { + err = -ENOMEM; + goto out; + } + + /* Register new MAC */ + table->refs[free] = 1; + table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); + + err = mlx4_set_port_vlan_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); + table->refs[free] = 0; + table->entries[free] = 0; + goto out; + } + + *index = free; + ++table->total; + out: + mutex_unlock(&table->mutex); + return err; + } + EXPORT_SYMBOL_GPL(mlx4_register_vlan); + + void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) + { + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + + if (index < MLX4_VLAN_REGULAR) { + mlx4_warn(dev, "Trying to free special vlan index %d\n", index); + return; + } + + mutex_lock(&table->mutex); + if (!table->refs[index]) { + mlx4_warn(dev, "No vlan entry for index %d\n", index); + goto out; + } + if (--table->refs[index]) { + mlx4_dbg(dev, "Have more references for index %d," + "no need to modify vlan table\n", index); + goto out; + } + table->entries[index] = 0; + mlx4_set_port_vlan_table(dev, port, table->entries); + --table->total; + out: + mutex_unlock(&table->mutex); + } + EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); + + int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) + { + struct mlx4_cmd_mailbox *inmailbox, *outmailbox; + u8 *inbuf, *outbuf; + int err; + + inmailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inmailbox)) + return PTR_ERR(inmailbox); + + outmailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outmailbox)) { + mlx4_free_cmd_mailbox(dev, inmailbox); + return PTR_ERR(outmailbox); + } + + inbuf = inmailbox->buf; + outbuf = outmailbox->buf; + memset(inbuf, 0, 256); + memset(outbuf, 0, 256); + inbuf[0] = 1; + inbuf[1] = 1; + inbuf[2] = 1; + inbuf[3] = 1; + *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015); + *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); + + err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); + if (!err) + *caps = *(__be32 *) (outbuf + 84); + mlx4_free_cmd_mailbox(dev, inmailbox); + mlx4_free_cmd_mailbox(dev, outmailbox); + return err; + } + + int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) + { + struct mlx4_cmd_mailbox *mailbox; + int err; + + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memset(mailbox->buf, 0, 256); + + ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } diff --combined drivers/net/ethernet/mellanox/mlx4/qp.c index 51c5389,ec9350e..51c5389 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@@ -280,9 -280,6 +280,9 @@@ int mlx4_init_qp_table(struct mlx4_dev * We reserve 2 extra QPs per port for the special QPs. The * block of special QPs must be aligned to a multiple of 8, so * round up. + * + * We also reserve the MSB of the 24-bit QP number to indicate + * that a QP is an XRC QP. */ dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); diff --combined drivers/net/ethernet/mellanox/mlx4/srq.c index a20b141,3b07b80..a20b141 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@@ -40,20 -40,20 +40,20 @@@ struct mlx4_srq_context { __be32 state_logsize_srqn; u8 logstride; - u8 reserved1[3]; - u8 pg_offset; - u8 reserved2[3]; - u32 reserved3; + u8 reserved1; + __be16 xrcd; + __be32 pg_offset_cqn; + u32 reserved2; u8 log_page_size; - u8 reserved4[2]; + u8 reserved3[2]; u8 mtt_base_addr_h; __be32 mtt_base_addr_l; __be32 pd; __be16 limit_watermark; __be16 wqe_cnt; - u16 reserved5; + u16 reserved4; __be16 wqe_counter; - u32 reserved6; + u32 reserved5; __be64 db_rec_addr; };
@@@ -109,8 -109,8 +109,8 @@@ static int mlx4_QUERY_SRQ(struct mlx4_d MLX4_CMD_TIME_CLASS_A); }
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, - u64 db_rec, struct mlx4_srq *srq) +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, + struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; struct mlx4_cmd_mailbox *mailbox; @@@ -148,8 -148,6 +148,8 @@@ srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | srq->srqn); srq_context->logstride = srq->wqe_shift - 4; + srq_context->xrcd = cpu_to_be16(xrcd); + srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt); diff --combined drivers/net/ethernet/realtek/r8169.c index 0000000,aa39e77..92b45f0 mode 000000,100644..100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -1,0 -1,6219 +1,6243 @@@ + /* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen shuchen@realtek.com.tw + * Copyright (c) 2003 - 2007 Francois Romieu romieu@fr.zoreil.com + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + + #include <linux/module.h> + #include <linux/moduleparam.h> + #include <linux/pci.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/delay.h> + #include <linux/ethtool.h> + #include <linux/mii.h> + #include <linux/if_vlan.h> + #include <linux/crc32.h> + #include <linux/in.h> + #include <linux/ip.h> + #include <linux/tcp.h> + #include <linux/init.h> + #include <linux/interrupt.h> + #include <linux/dma-mapping.h> + #include <linux/pm_runtime.h> + #include <linux/firmware.h> + #include <linux/pci-aspm.h> + #include <linux/prefetch.h> + + #include <asm/system.h> + #include <asm/io.h> + #include <asm/irq.h> + + #define RTL8169_VERSION "2.3LK-NAPI" + #define MODULENAME "r8169" + #define PFX MODULENAME ": " + + #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" + #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" + #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" + #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" + #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" + #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" + #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" + #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" + + #ifdef RTL8169_DEBUG + #define assert(expr) \ + if (!(expr)) { \ + printk( "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__func__,__LINE__); \ + } + #define dprintk(fmt, args...) \ + do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) + #else + #define assert(expr) do {} while (0) + #define dprintk(fmt, args...) do {} while (0) + #endif /* RTL8169_DEBUG */ + + #define R8169_MSG_DEFAULT \ + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) + + #define TX_BUFFS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) + + /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ + static const int multicast_filter_limit = 32; + + /* MAC address length */ + #define MAC_ADDR_LEN 6 + + #define MAX_READ_REQUEST_SHIFT 12 + #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ + #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ + #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + + #define R8169_REGS_SIZE 256 + #define R8169_NAPI_WEIGHT 64 + #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ + #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ + #define RX_BUF_SIZE 1536 /* Rx Buffer size */ + #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) + #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + + #define RTL8169_TX_TIMEOUT (6*HZ) + #define RTL8169_PHY_TIMEOUT (10*HZ) + + #define RTL_EEPROM_SIG cpu_to_le32(0x8129) + #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff) + #define RTL_EEPROM_SIG_ADDR 0x0000 + + /* write/read MMIO register */ + #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) + #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) + #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) + #define RTL_R8(reg) readb (ioaddr + (reg)) + #define RTL_R16(reg) readw (ioaddr + (reg)) + #define RTL_R32(reg) readl (ioaddr + (reg)) + + enum mac_version { + RTL_GIGA_MAC_VER_01 = 0, + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_15, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_NONE = 0xff, + }; + + enum rtl_tx_desc_version { + RTL_TD_0 = 0, + RTL_TD_1 = 1, + }; + + #define JUMBO_1K ETH_DATA_LEN + #define JUMBO_4K (4*1024 - ETH_HLEN - 2) + #define JUMBO_6K (6*1024 - ETH_HLEN - 2) + #define JUMBO_7K (7*1024 - ETH_HLEN - 2) + #define JUMBO_9K (9*1024 - ETH_HLEN - 2) + + #define _R(NAME,TD,FW,SZ,B) { \ + .name = NAME, \ + .txd_version = TD, \ + .fw_name = FW, \ + .jumbo_max = SZ, \ + .jumbo_tx_csum = B \ + } + + static const struct { + const char *name; + enum rtl_tx_desc_version txd_version; + const char *fw_name; + u16 jumbo_max; + bool jumbo_tx_csum; + } rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_01] = + _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_02] = + _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_03] = + _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_04] = + _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_05] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_06] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_08] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_09] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_10] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_11] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_12] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_13] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_14] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_15] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_16] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_17] = + _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_18] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_19] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_20] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_21] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_22] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_23] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_24] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_25] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_26] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_27] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_28] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_29] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_30] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_31] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_32] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_33] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_34] = + _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_35] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_36] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, + JUMBO_9K, false), + }; + #undef _R + + enum cfg_version { + RTL_CFG_0 = 0x00, + RTL_CFG_1, + RTL_CFG_2 + }; + + static void rtl_hw_start_8169(struct net_device *); + static void rtl_hw_start_8168(struct net_device *); + static void rtl_hw_start_8101(struct net_device *); + + static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, + PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, + { 0x0001, 0x8168, + PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 }, + {0,}, + }; + + MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + + static int rx_buf_sz = 16383; + static int use_dac; + static struct { + u32 msg_enable; + } debug = { -1 }; + + enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, + #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ + #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, + #define RX128_INT_EN (1 << 15) /* 8111c and later */ + #define RX_MULTI_EN (1 << 14) /* 8111c only */ + #define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ + #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) + #define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ + #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + RxMissed = 0x4c, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + MultiIntr = 0x5c, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + + #define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + + #define TxPacketMax (8064 >> 7) + #define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + FuncForceEvent = 0xfc, + }; + + enum rtl8110_registers { + TBICSR = 0x64, + TBI_ANAR = 0x68, + TBI_LPAR = 0x6a, + }; + + enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, + #define CSIAR_FLAG 0x80000000 + #define CSIAR_WRITE_CMD 0x80000000 + #define CSIAR_BYTE_ENABLE 0x0f + #define CSIAR_BYTE_ENABLE_SHIFT 12 + #define CSIAR_ADDR_MASK 0x0fff + PMCH = 0x6f, + EPHYAR = 0x80, + #define EPHYAR_FLAG 0x80000000 + #define EPHYAR_WRITE_CMD 0x80000000 + #define EPHYAR_REG_MASK 0x1f + #define EPHYAR_REG_SHIFT 16 + #define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, + #define PFM_EN (1 << 6) + DBG_REG = 0xd1, + #define FIX_NAK_1 (1 << 4) + #define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, + #define NOW_IS_OOB (1 << 7) + #define EN_NDP (1 << 3) + #define EN_OOB_RESET (1 << 2) + EFUSEAR = 0xdc, + #define EFUSEAR_FLAG 0x80000000 + #define EFUSEAR_WRITE_CMD 0x80000000 + #define EFUSEAR_READ_CMD 0x00000000 + #define EFUSEAR_REG_MASK 0x03ff + #define EFUSEAR_REG_SHIFT 8 + #define EFUSEAR_DATA_MASK 0xff + }; + + enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, + #define ERIAR_FLAG 0x80000000 + #define ERIAR_WRITE_CMD 0x80000000 + #define ERIAR_READ_CMD 0x00000000 + #define ERIAR_ADDR_BYTE_ALIGN 4 + #define ERIAR_TYPE_SHIFT 16 + #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) + #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) + #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) + #define ERIAR_MASK_SHIFT 12 + #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) + #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) + #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ + #define OCPDR_WRITE_CMD 0x80000000 + #define OCPDR_READ_CMD 0x00000000 + #define OCPDR_REG_MASK 0x7f + #define OCPDR_GPHY_REG_SHIFT 16 + #define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, + #define OCPAR_FLAG 0x80000000 + #define OCPAR_GPHY_WRITE_CMD 0x8000f060 + #define OCPAR_GPHY_READ_CMD 0x0000f060 + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ + #define TXPLA_RST (1 << 29) + #define PWM_EN (1 << 22) + }; + + enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxBOVF = (1 << 24), + RxFOVF = (1 << 23), + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, + #define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + + /* TBICSR p.28 */ + TBIReset = 0x80000000, + TBILoopback = 0x40000000, + TBINwEnable = 0x20000000, + TBINwRestart = 0x10000000, + TBILinkOk = 0x02000000, + TBINwComplete = 0x01000000, + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), + INTT_0 = 0x0000, // 8168 + INTT_1 = 0x0001, // 8168 + INTT_2 = 0x0002, // 8168 + INTT_3 = 0x0003, // 8168 + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* _TBICSRBit */ + TBILinkOK = 0x02000000, + + /* DumpCounterCommand */ + CounterDump = 0x8, + }; + + enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ + }; + + /* Generic case. */ + enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ + #define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ + }; + + /* 8169, 8168b and 810x except 8102e. */ + enum rtl_tx_desc_bit_0 { + /* First doubleword. */ + #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ + }; + + /* 8102e, 8168c and beyond. */ + enum rtl_tx_desc_bit_1 { + /* Second doubleword. */ + #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IP_CS = (1 << 29), /* Calculate IP checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ + }; + + static const struct rtl_tx_desc_info { + struct { + u32 udp; + u32 tcp; + } checksum; + u16 mss_shift; + u16 opts_offset; + } tx_desc_info [] = { + [RTL_TD_0] = { + .checksum = { + .udp = TD0_IP_CS | TD0_UDP_CS, + .tcp = TD0_IP_CS | TD0_TCP_CS + }, + .mss_shift = TD0_MSS_SHIFT, + .opts_offset = 0 + }, + [RTL_TD_1] = { + .checksum = { + .udp = TD1_IP_CS | TD1_UDP_CS, + .tcp = TD1_IP_CS | TD1_TCP_CS + }, + .mss_shift = TD1_MSS_SHIFT, + .opts_offset = 1 + } + }; + + enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 2/2 */ + + #define RxProtoUDP (PID1) + #define RxProtoTCP (PID0) + #define RxProtoIP (PID1 | PID0) + #define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ + }; + + #define RsvdMask 0x3fffc000 + + struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; + }; + + struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; + }; + + struct ring_info { + struct sk_buff *skb; + u32 len; + u8 __pad[sizeof(void *) - sizeof(u32)]; + }; + + enum features { + RTL_FEATURE_WOL = (1 << 0), + RTL_FEATURE_MSI = (1 << 1), + RTL_FEATURE_GMII = (1 << 2), + }; + + struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; + }; + + struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct napi_struct napi; + spinlock_t lock; + u32 msg_enable; + u16 txd_version; + u16 mac_version; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_rx; + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + struct timer_list timer; + u16 cp_cmd; + u16 intr_event; + u16 napi_event; + u16 intr_mask; + + struct mdio_ops { + void (*write)(void __iomem *, int, int); + int (*read)(void __iomem *, int); + } mdio_ops; + + struct pll_power_ops { + void (*down)(struct rtl8169_private *); + void (*up)(struct rtl8169_private *); + } pll_power_ops; + + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); + } jumbo_ops; + + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + void (*phy_reset_enable)(struct rtl8169_private *tp); + void (*hw_start)(struct net_device *); + unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); + unsigned int (*link_ok)(void __iomem *); + int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); + struct delayed_work task; + unsigned features; + + struct mii_if_info mii; + struct rtl8169_counters counters; + u32 saved_wolopts; + u32 opts1_mask; + + struct rtl_fw { + const struct firmware *fw; + + #define RTL_VER_SIZE 32 + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; + } *rtl_fw; + #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) + }; + + MODULE_AUTHOR("Realtek and the Linux r8169 crew netdev@vger.kernel.org"); + MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); + module_param(use_dac, int, 0); + MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); + module_param_named(debug, debug.msg_enable, int, 0); + MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(RTL8169_VERSION); + MODULE_FIRMWARE(FIRMWARE_8168D_1); + MODULE_FIRMWARE(FIRMWARE_8168D_2); + MODULE_FIRMWARE(FIRMWARE_8168E_1); + MODULE_FIRMWARE(FIRMWARE_8168E_2); + MODULE_FIRMWARE(FIRMWARE_8168E_3); + MODULE_FIRMWARE(FIRMWARE_8105E_1); + MODULE_FIRMWARE(FIRMWARE_8168F_1); + MODULE_FIRMWARE(FIRMWARE_8168F_2); + + static int rtl8169_open(struct net_device *dev); + static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev); + static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance); + static int rtl8169_init_ring(struct net_device *dev); + static void rtl_hw_start(struct net_device *dev); + static int rtl8169_close(struct net_device *dev); + static void rtl_set_rx_mode(struct net_device *dev); + static void rtl8169_tx_timeout(struct net_device *dev); + static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); + static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, + void __iomem *, u32 budget); + static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); + static void rtl8169_down(struct net_device *dev); + static void rtl8169_rx_clear(struct rtl8169_private *tp); + static int rtl8169_poll(struct napi_struct *napi, int budget); + + static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) + { + int cap = pci_pcie_cap(pdev); + + if (cap) { + u16 ctl; + + pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); + ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force; + pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); + } + } + + static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) + { + void __iomem *ioaddr = tp->mmio_addr; + int i; + + RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + for (i = 0; i < 20; i++) { + udelay(100); + if (RTL_R32(OCPAR) & OCPAR_FLAG) + break; + } + return RTL_R32(OCPDR); + } + + static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) + { + void __iomem *ioaddr = tp->mmio_addr; + int i; + + RTL_W32(OCPDR, data); + RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + for (i = 0; i < 20; i++) { + udelay(100); + if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0) + break; + } + } + + static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) + { + void __iomem *ioaddr = tp->mmio_addr; + int i; + + RTL_W8(ERIDR, cmd); + RTL_W32(ERIAR, 0x800010e8); + msleep(2); + for (i = 0; i < 5; i++) { + udelay(100); + if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) + break; + } + + ocp_write(tp, 0x1, 0x30, 0x00000001); + } + + #define OOB_CMD_RESET 0x00 + #define OOB_CMD_DRIVER_START 0x05 + #define OOB_CMD_DRIVER_STOP 0x06 + + static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) + { + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; + } + + static void rtl8168_driver_start(struct rtl8169_private *tp) + { + u16 reg; + int i; + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + + reg = rtl8168_get_ocp_reg(tp); + + for (i = 0; i < 10; i++) { + msleep(10); + if (ocp_read(tp, 0x0f, reg) & 0x00000800) + break; + } + } + + static void rtl8168_driver_stop(struct rtl8169_private *tp) + { + u16 reg; + int i; + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + + reg = rtl8168_get_ocp_reg(tp); + + for (i = 0; i < 10; i++) { + msleep(10); + if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0) + break; + } + } + + static int r8168dp_check_dash(struct rtl8169_private *tp) + { + u16 reg = rtl8168_get_ocp_reg(tp); + + return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; + } + + static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value) + { + int i; + + RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff)); + + for (i = 20; i > 0; i--) { + /* + * Check if the RTL8169 has completed writing to the specified + * MII register. + */ + if (!(RTL_R32(PHYAR) & 0x80000000)) + break; + udelay(25); + } + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); + } + + static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr) + { + int i, value = -1; + + RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16); + + for (i = 20; i > 0; i--) { + /* + * Check if the RTL8169 has completed retrieving data from + * the specified MII register. + */ + if (RTL_R32(PHYAR) & 0x80000000) { + value = RTL_R32(PHYAR) & 0xffff; + break; + } + udelay(25); + } + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; + } + + static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data) + { + int i; + + RTL_W32(OCPDR, data | + ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (!(RTL_R32(OCPAR) & OCPAR_FLAG)) + break; + } + } + + static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value) + { + r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD | + (value & OCPDR_DATA_MASK)); + } + + static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr) + { + int i; + + r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (RTL_R32(OCPAR) & OCPAR_FLAG) + break; + } + + return RTL_R32(OCPDR) & OCPDR_DATA_MASK; + } + + #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + + static void r8168dp_2_mdio_start(void __iomem *ioaddr) + { + RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); + } + + static void r8168dp_2_mdio_stop(void __iomem *ioaddr) + { + RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); + } + + static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value) + { + r8168dp_2_mdio_start(ioaddr); + + r8169_mdio_write(ioaddr, reg_addr, value); + + r8168dp_2_mdio_stop(ioaddr); + } + + static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr) + { + int value; + + r8168dp_2_mdio_start(ioaddr); + + value = r8169_mdio_read(ioaddr, reg_addr); + + r8168dp_2_mdio_stop(ioaddr); + + return value; + } + + static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) + { + tp->mdio_ops.write(tp->mmio_addr, location, val); + } + + static int rtl_readphy(struct rtl8169_private *tp, int location) + { + return tp->mdio_ops.read(tp->mmio_addr, location); + } + + static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) + { + rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); + } + + static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) + { + int val; + + val = rtl_readphy(tp, reg_addr); + rtl_writephy(tp, reg_addr, (val | p) & ~m); + } + + static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, + int val) + { + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_writephy(tp, location, val); + } + + static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) + { + struct rtl8169_private *tp = netdev_priv(dev); + + return rtl_readphy(tp, location); + } + + static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value) + { + unsigned int i; + + RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG)) + break; + udelay(10); + } + } + + static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr) + { + u16 value = 0xffff; + unsigned int i; + + RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + for (i = 0; i < 100; i++) { + if (RTL_R32(EPHYAR) & EPHYAR_FLAG) { + value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK; + break; + } + udelay(10); + } + + return value; + } + + static void rtl_csi_write(void __iomem *ioaddr, int addr, int value) + { + unsigned int i; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(CSIAR) & CSIAR_FLAG)) + break; + udelay(10); + } + } + + static u32 rtl_csi_read(void __iomem *ioaddr, int addr) + { + u32 value = ~0x00; + unsigned int i; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + for (i = 0; i < 100; i++) { + if (RTL_R32(CSIAR) & CSIAR_FLAG) { + value = RTL_R32(CSIDR); + break; + } + udelay(10); + } + + return value; + } + + static + void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) + { + unsigned int i; + + BUG_ON((addr & 3) || (mask == 0)); + RTL_W32(ERIDR, val); + RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) + break; + udelay(100); + } + } + + static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type) + { + u32 value = ~0x00; + unsigned int i; + + RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + + for (i = 0; i < 100; i++) { + if (RTL_R32(ERIAR) & ERIAR_FLAG) { + value = RTL_R32(ERIDR); + break; + } + udelay(100); + } + + return value; + } + + static void + rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type) + { + u32 val; + + val = rtl_eri_read(ioaddr, addr, type); + rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); + } + + struct exgmac_reg { + u16 addr; + u16 mask; + u32 val; + }; + + static void rtl_write_exgmac_batch(void __iomem *ioaddr, + const struct exgmac_reg *r, int len) + { + while (len-- > 0) { + rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC); + r++; + } + } + + static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) + { + u8 value = 0xff; + unsigned int i; + + RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + for (i = 0; i < 300; i++) { + if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) { + value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK; + break; + } + udelay(100); + } + + return value; + } + + static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) + { + RTL_W16(IntrMask, 0x0000); + + RTL_W16(IntrStatus, 0xffff); + } + + static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(TBICSR) & TBIReset; + } + + static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) + { + return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; + } + + static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) + { + return RTL_R32(TBICSR) & TBILinkOk; + } + + static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr) + { + return RTL_R8(PHYstatus) & LinkStatus; + } + + static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); + } + + static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) + { + unsigned int val; + + val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; + rtl_writephy(tp, MII_BMCR, val & 0xffff); + } + + static void rtl_link_chg_patch(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + struct net_device *dev = tp->dev; + + if (!netif_running(dev)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x00000011, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x00000005, ERIAR_EXGMAC); + } else if (RTL_R8(PHYstatus) & _100bps) { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x0000001f, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x00000005, ERIAR_EXGMAC); + } else { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x0000001f, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x0000003f, ERIAR_EXGMAC); + } + /* Reset packet filter */ + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, + ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, + ERIAR_EXGMAC); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x00000011, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x00000005, ERIAR_EXGMAC); + } else { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x0000001f, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x0000003f, ERIAR_EXGMAC); + } + } + } + + static void __rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr, bool pm) + { + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + if (tp->link_ok(ioaddr)) { + rtl_link_chg_patch(tp); + /* This is to cancel a scheduled suspend if there's one. */ + if (pm) + pm_request_resume(&tp->pci_dev->dev); + netif_carrier_on(dev); + if (net_ratelimit()) + netif_info(tp, ifup, dev, "link up\n"); + } else { + netif_carrier_off(dev); + netif_info(tp, ifdown, dev, "link down\n"); + if (pm) + pm_schedule_suspend(&tp->pci_dev->dev, 100); + } + spin_unlock_irqrestore(&tp->lock, flags); + } + + static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) + { + __rtl8169_check_link_status(dev, tp, ioaddr, false); + } + + #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + + static u32 __rtl8169_get_wol(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + u8 options; + u32 wolopts = 0; + + options = RTL_R8(Config1); + if (!(options & PMEnable)) + return 0; + + options = RTL_R8(Config3); + if (options & LinkUp) + wolopts |= WAKE_PHY; + if (options & MagicPacket) + wolopts |= WAKE_MAGIC; + + options = RTL_R8(Config5); + if (options & UWF) + wolopts |= WAKE_UCAST; + if (options & BWF) + wolopts |= WAKE_BCAST; + if (options & MWF) + wolopts |= WAKE_MCAST; + + return wolopts; + } + + static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct rtl8169_private *tp = netdev_priv(dev); + + spin_lock_irq(&tp->lock); + + wol->supported = WAKE_ANY; + wol->wolopts = __rtl8169_get_wol(tp); + + spin_unlock_irq(&tp->lock); + } + + static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) + { + void __iomem *ioaddr = tp->mmio_addr; + unsigned int i; + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_ANY, Config1, PMEnable }, + { WAKE_PHY, Config3, LinkUp }, + { WAKE_MAGIC, Config3, MagicPacket }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake } + }; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + for (i = 0; i < ARRAY_SIZE(cfg); i++) { + u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(cfg[i].reg, options); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + } + + static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct rtl8169_private *tp = netdev_priv(dev); + + spin_lock_irq(&tp->lock); + + if (wol->wolopts) + tp->features |= RTL_FEATURE_WOL; + else + tp->features &= ~RTL_FEATURE_WOL; + __rtl8169_set_wol(tp, wol->wolopts); + spin_unlock_irq(&tp->lock); + + device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + + return 0; + } + + static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp) + { + return rtl_chip_infos[tp->mac_version].fw_name; + } + + static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) + { + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strcpy(info->driver, MODULENAME); + strcpy(info->version, RTL8169_VERSION); + strcpy(info->bus_info, pci_name(tp->pci_dev)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" : + rtl_fw->version); + } + + static int rtl8169_get_regs_len(struct net_device *dev) + { + return R8169_REGS_SIZE; + } + + static int rtl8169_set_speed_tbi(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 ignored) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int ret = 0; + u32 reg; + + reg = RTL_R32(TBICSR); + if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && + (duplex == DUPLEX_FULL)) { + RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); + } else if (autoneg == AUTONEG_ENABLE) + RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); + else { + netif_warn(tp, link, dev, + "incorrect speed setting refused in TBI mode\n"); + ret = -EOPNOTSUPP; + } + + return ret; + } + + static int rtl8169_set_speed_xmii(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 adv) + { + struct rtl8169_private *tp = netdev_priv(dev); + int giga_ctrl, bmcr; + int rc = -EINVAL; + + rtl_writephy(tp, 0x1f, 0x0000); + + if (autoneg == AUTONEG_ENABLE) { + int auto_nego; + + auto_nego = rtl_readphy(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + + if (adv & ADVERTISED_10baseT_Half) + auto_nego |= ADVERTISE_10HALF; + if (adv & ADVERTISED_10baseT_Full) + auto_nego |= ADVERTISE_10FULL; + if (adv & ADVERTISED_100baseT_Half) + auto_nego |= ADVERTISE_100HALF; + if (adv & ADVERTISED_100baseT_Full) + auto_nego |= ADVERTISE_100FULL; + + auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + giga_ctrl = rtl_readphy(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + /* The 8100e/8101e/8102e do Fast Ethernet only. */ + if (tp->mii.supports_gmii) { + if (adv & ADVERTISED_1000baseT_Half) + giga_ctrl |= ADVERTISE_1000HALF; + if (adv & ADVERTISED_1000baseT_Full) + giga_ctrl |= ADVERTISE_1000FULL; + } else if (adv & (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) { + netif_info(tp, link, dev, + "PHY does not support 1000Mbps\n"); + goto out; + } + + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + + rtl_writephy(tp, MII_ADVERTISE, auto_nego); + rtl_writephy(tp, MII_CTRL1000, giga_ctrl); + } else { + giga_ctrl = 0; + + if (speed == SPEED_10) + bmcr = 0; + else if (speed == SPEED_100) + bmcr = BMCR_SPEED100; + else + goto out; + + if (duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + + rtl_writephy(tp, MII_BMCR, bmcr); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { + rtl_writephy(tp, 0x17, 0x2138); + rtl_writephy(tp, 0x0e, 0x0260); + } else { + rtl_writephy(tp, 0x17, 0x2108); + rtl_writephy(tp, 0x0e, 0x0000); + } + } + + rc = 0; + out: + return rc; + } + + static int rtl8169_set_speed(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 advertising) + { + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); + if (ret < 0) + goto out; + + if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && + (advertising & ADVERTISED_1000baseT_Full)) { + mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); + } + out: + return ret; + } + + static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + int ret; + + del_timer_sync(&tp->timer); + + spin_lock_irqsave(&tp->lock, flags); + ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), + cmd->duplex, cmd->advertising); + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; + } + + static u32 rtl8169_fix_features(struct net_device *dev, u32 features) + { + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > JUMBO_1K && + !rtl_chip_infos[tp->mac_version].jumbo_tx_csum) + features &= ~NETIF_F_IP_CSUM; + + return features; + } + + static int rtl8169_set_features(struct net_device *dev, u32 features) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (dev->features & NETIF_F_HW_VLAN_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; + } + + static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, + struct sk_buff *skb) + { + return (vlan_tx_tag_present(skb)) ? + TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; + } + + static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) + { + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); + + desc->opts2 = 0; + } + + static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 status; + + cmd->supported = + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + + status = RTL_R32(TBICSR); + cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; + cmd->autoneg = !!(status & TBINwEnable); + + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; /* Always set */ + + return 0; + } + + static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct rtl8169_private *tp = netdev_priv(dev); + + return mii_ethtool_gset(&tp->mii, cmd); + } + + static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + int rc; + + spin_lock_irqsave(&tp->lock, flags); + + rc = tp->get_settings(dev, cmd); + + spin_unlock_irqrestore(&tp->lock, flags); + return rc; + } + + static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) + { + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + + if (regs->len > R8169_REGS_SIZE) + regs->len = R8169_REGS_SIZE; + + spin_lock_irqsave(&tp->lock, flags); + memcpy_fromio(p, tp->mmio_addr, regs->len); + spin_unlock_irqrestore(&tp->lock, flags); + } + + static u32 rtl8169_get_msglevel(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->msg_enable; + } + + static void rtl8169_set_msglevel(struct net_device *dev, u32 value) + { + struct rtl8169_private *tp = netdev_priv(dev); + + tp->msg_enable = value; + } + + static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", + }; + + static int rtl8169_get_sset_count(struct net_device *dev, int sset) + { + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } + } + + static void rtl8169_update_counters(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + struct rtl8169_counters *counters; + dma_addr_t paddr; + u32 cmd; + int wait = 1000; + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. + */ + if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) + return; + + counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL); + if (!counters) + return; + + RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(CounterAddrLow, cmd); + RTL_W32(CounterAddrLow, cmd | CounterDump); + + while (wait--) { + if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) { + memcpy(&tp->counters, counters, sizeof(*counters)); + break; + } + udelay(10); + } + + RTL_W32(CounterAddrLow, 0); + RTL_W32(CounterAddrHigh, 0); + + dma_free_coherent(d, sizeof(*counters), counters, paddr); + } + + static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) + { + struct rtl8169_private *tp = netdev_priv(dev); + + ASSERT_RTNL(); + + rtl8169_update_counters(dev); + + data[0] = le64_to_cpu(tp->counters.tx_packets); + data[1] = le64_to_cpu(tp->counters.rx_packets); + data[2] = le64_to_cpu(tp->counters.tx_errors); + data[3] = le32_to_cpu(tp->counters.rx_errors); + data[4] = le16_to_cpu(tp->counters.rx_missed); + data[5] = le16_to_cpu(tp->counters.align_errors); + data[6] = le32_to_cpu(tp->counters.tx_one_collision); + data[7] = le32_to_cpu(tp->counters.tx_multi_collision); + data[8] = le64_to_cpu(tp->counters.rx_unicast); + data[9] = le64_to_cpu(tp->counters.rx_broadcast); + data[10] = le32_to_cpu(tp->counters.rx_multicast); + data[11] = le16_to_cpu(tp->counters.tx_aborted); + data[12] = le16_to_cpu(tp->counters.tx_underun); + } + + static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) + { + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } + } + + static const struct ethtool_ops rtl8169_ethtool_ops = { + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_settings = rtl8169_get_settings, + .set_settings = rtl8169_set_settings, + .get_msglevel = rtl8169_get_msglevel, + .set_msglevel = rtl8169_set_msglevel, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + }; + + static void rtl8169_get_mac_version(struct rtl8169_private *tp, + struct net_device *dev, u8 default_version) + { + void __iomem *ioaddr = tp->mmio_addr; + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u32 mask; + u32 val; + int mac_version; + } mac_info[] = { + /* 8168F family. */ + { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, + { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, + { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 }, + { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, + { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, + { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, + { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, + { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, + { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, + { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, + { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, + { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, + { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, + { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 }, + { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, + { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, + { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, + { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 }, + { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, + { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR */ + { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, + { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, + + /* 8110 family. */ + { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, + { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, + { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, + { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, + { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, + { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, + + /* Catch-all */ + { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + u32 reg; + + reg = RTL_R32(TxConfig); + while ((reg & p->mask) != p->val) + p++; + tp->mac_version = p->mac_version; + + if (tp->mac_version == RTL_GIGA_MAC_NONE) { + netif_notice(tp, probe, dev, + "unknown MAC, using family default\n"); + tp->mac_version = default_version; + } + } + + static void rtl8169_print_mac_version(struct rtl8169_private *tp) + { + dprintk("mac_version = 0x%02x\n", tp->mac_version); + } + + struct phy_reg { + u16 reg; + u16 val; + }; + + static void rtl_writephy_batch(struct rtl8169_private *tp, + const struct phy_reg *regs, int len) + { + while (len-- > 0) { + rtl_writephy(tp, regs->reg, regs->val); + regs++; + } + } + + #define PHY_READ 0x00000000 + #define PHY_DATA_OR 0x10000000 + #define PHY_DATA_AND 0x20000000 + #define PHY_BJMPN 0x30000000 + #define PHY_READ_EFUSE 0x40000000 + #define PHY_READ_MAC_BYTE 0x50000000 + #define PHY_WRITE_MAC_BYTE 0x60000000 + #define PHY_CLEAR_READCOUNT 0x70000000 + #define PHY_WRITE 0x80000000 + #define PHY_READCOUNT_EQ_SKIP 0x90000000 + #define PHY_COMP_EQ_SKIPN 0xa0000000 + #define PHY_COMP_NEQ_SKIPN 0xb0000000 + #define PHY_WRITE_PREVIOUS 0xc0000000 + #define PHY_SKIPN 0xd0000000 + #define PHY_DELAY_MS 0xe0000000 + #define PHY_WRITE_ERI_WORD 0xf0000000 + + struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; + } __packed; + + #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) + + static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) + { + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + char *version = rtl_fw->version; + bool rc = false; + + if (fw->size < FW_OPCODE_SIZE) + goto out; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + goto out; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + goto out; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + goto out; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + goto out; + + memcpy(version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + goto out; + + strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + version[RTL_VER_SIZE - 1] = 0; + + rc = true; + out: + return rc; + } + + static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, + struct rtl_fw_phy_action *pa) + { + bool rc = false; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 regno = (action & 0x0fff0000) >> 16; + + switch(action & 0xf0000000) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_READ_EFUSE: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_BJMPN: + if (regno > index) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: + default: + netif_err(tp, ifup, tp->dev, + "Invalid action 0x%08x\n", action); + goto out; + } + } + rc = true; + out: + return rc; + } + + static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) + { + struct net_device *dev = tp->dev; + int rc = -EINVAL; + + if (!rtl_fw_format_ok(tp, rtl_fw)) { + netif_err(tp, ifup, dev, "invalid firwmare\n"); + goto out; + } + + if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action)) + rc = 0; + out: + return rc; + } + + static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) + { + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + u32 predata, count; + size_t index; + + predata = count = 0; + + for (index = 0; index < pa->size; ) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + if (!action) + break; + + switch(action & 0xf0000000) { + case PHY_READ: + predata = rtl_readphy(tp, regno); + count++; + index++; + break; + case PHY_DATA_OR: + predata |= data; + index++; + break; + case PHY_DATA_AND: + predata &= data; + index++; + break; + case PHY_BJMPN: + index -= regno; + break; + case PHY_READ_EFUSE: + predata = rtl8168d_efuse_read(tp->mmio_addr, regno); + index++; + break; + case PHY_CLEAR_READCOUNT: + count = 0; + index++; + break; + case PHY_WRITE: + rtl_writephy(tp, regno, data); + index++; + break; + case PHY_READCOUNT_EQ_SKIP: + index += (count == data) ? 2 : 1; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + index++; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + index++; + break; + case PHY_WRITE_PREVIOUS: + rtl_writephy(tp, regno, predata); + index++; + break; + case PHY_SKIPN: + index += regno + 1; + break; + case PHY_DELAY_MS: + mdelay(data); + index++; + break; + + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: + default: + BUG(); + } + } + } + + static void rtl_release_firmware(struct rtl8169_private *tp) + { + if (!IS_ERR_OR_NULL(tp->rtl_fw)) { + release_firmware(tp->rtl_fw->fw); + kfree(tp->rtl_fw); + } + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + } + + static void rtl_apply_firmware(struct rtl8169_private *tp) + { + struct rtl_fw *rtl_fw = tp->rtl_fw; + + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + if (!IS_ERR_OR_NULL(rtl_fw)) + rtl_phy_write_fw(tp, rtl_fw); + } + + static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) + { + if (rtl_readphy(tp, reg) != val) + netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); + else + rtl_apply_firmware(tp); + } + + static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x01, 0x90d0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) + { + struct pci_dev *pdev = tp->pci_dev; + + if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) || + (pdev->subsystem_device != 0xe000)) + return; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_writephy(tp, 0x10, 0xf01b); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8169scd_hw_phy_config_quirk(tp); + } + + static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_patchphy(tp, 0x16, 1 << 0); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0000 }, + { 0x1d, 0x0f00 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x1ec8 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp) + { + rtl8168c_3_hw_phy_config(tp); + } + + static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + void __iomem *ioaddr = tp->mmio_addr; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef); + rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00); + + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x6662 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x6662 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* RSET couple improve */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0d, 0x0300); + rtl_patchphy(tp, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); + + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + void __iomem *ioaddr = tp->mmio_addr; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x2642 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x2642 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + + /* Switching regulator Slew rate */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0f, 0x0017); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); + + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x0023 }, + { 0x16, 0x0000 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x002d }, + { 0x18, 0x0040 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_patchphy(tp, 0x0d, 1 << 5); + } + + static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b80 }, + { 0x06, 0xc896 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + + /* Update PFM & 10M TX idle timer */ + { 0x1f, 0x0007 }, + { 0x1e, 0x002f }, + { 0x15, 0x1919 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* DCO enable for 10M IDLE Power */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0023); + rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* For impedance matching */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100); + rtl_writephy(tp, 0x1f, 0x0006); + rtl_writephy(tp, 0x00, 0x5a00); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + } + + static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0004 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0002 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Green Setting */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b5b }, + { 0x06, 0x9222 }, + { 0x05, 0x8b6d }, + { 0x06, 0x8000 }, + { 0x05, 0x8b76 }, + { 0x06, 0x8000 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* EEE setting */ + rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, + ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); ++ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00fb }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) + { + rtl_apply_firmware(tp); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x11, 1 << 12); + rtl_patchphy(tp, 0x19, 1 << 13); + rtl_patchphy(tp, 0x10, 1 << 15); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0005 }, + { 0x1a, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0004 }, + { 0x1c, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x15, 0x7701 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl_hw_phy_config(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_print_mac_version(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + break; + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + rtl8169s_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_04: + rtl8169sb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_05: + rtl8169scd_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_06: + rtl8169sce_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + rtl8102e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_11: + rtl8168bb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_12: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_17: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_18: + rtl8168cp_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_19: + rtl8168c_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_20: + rtl8168c_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_21: + rtl8168c_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_22: + rtl8168c_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + rtl8168cp_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_25: + rtl8168d_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_26: + rtl8168d_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_27: + rtl8168d_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_28: + rtl8168d_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + rtl8105e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_31: + /* None. */ + break; + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl8168e_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl8168e_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_35: + rtl8168f_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_36: + rtl8168f_2_hw_phy_config(tp); + break; + + default: + break; + } + } + + static void rtl8169_phy_timer(unsigned long __opaque) + { + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8169_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->timer; + void __iomem *ioaddr = tp->mmio_addr; + unsigned long timeout = RTL8169_PHY_TIMEOUT; + + assert(tp->mac_version > RTL_GIGA_MAC_VER_01); + + spin_lock_irq(&tp->lock); + + if (tp->phy_reset_pending(tp)) { + /* + * A busy loop could burn quite a few cycles on nowadays CPU. + * Let's delay the execution of the timer for a few ticks. + */ + timeout = HZ/10; + goto out_mod_timer; + } + + if (tp->link_ok(ioaddr)) + goto out_unlock; + + netif_warn(tp, link, dev, "PHY reset until link up\n"); + + tp->phy_reset_enable(tp); + + out_mod_timer: + mod_timer(timer, jiffies + timeout); + out_unlock: + spin_unlock_irq(&tp->lock); + } + + #ifdef CONFIG_NET_POLL_CONTROLLER + /* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ + static void rtl8169_netpoll(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + disable_irq(pdev->irq); + rtl8169_interrupt(pdev->irq, dev); + enable_irq(pdev->irq); + } + #endif + + static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, + void __iomem *ioaddr) + { + iounmap(ioaddr); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); + } + + static void rtl8169_phy_reset(struct net_device *dev, + struct rtl8169_private *tp) + { + unsigned int i; + + tp->phy_reset_enable(tp); + for (i = 0; i < 100; i++) { + if (!tp->phy_reset_pending(tp)) + return; + msleep(1); + } + netif_err(tp, link, dev, "PHY reset failed\n"); + } + + static bool rtl_tbi_enabled(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + return (tp->mac_version == RTL_GIGA_MAC_VER_01) && + (RTL_R8(PHYstatus) & TBI_Enable); + } + + static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_phy_config(dev); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + } + + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp-&g