The following commit has been merged in the master branch: commit 49b21301f6393c64784d45b8eb9f17985887a9e9 Merge: 6ea2c10707dc50aa7b038f3ae9eb963f79664c9e 8b1857357acd919b9a7fa391afbea30123fdfaec Author: Stephen Rothwell sfr@canb.auug.org.au Date: Tue Oct 25 17:17:57 2011 +1100
Merge remote-tracking branch 'net-next/master'
Conflicts: arch/powerpc/configs/40x/hcu4_defconfig drivers/s390/cio/qdio_main.c
diff --combined Documentation/feature-removal-schedule.txt index ead08f1,d5ac362..7c799fc --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@@ -495,6 -495,29 +495,6 @@@ Who: Jean Delvare <khali@linux-fr.org
----------------------------
-What: Support for UVCIOC_CTRL_ADD in the uvcvideo driver -When: 3.2 -Why: The information passed to the driver by this ioctl is now queried - dynamically from the device. -Who: Laurent Pinchart laurent.pinchart@ideasonboard.com - ----------------------------- - -What: Support for UVCIOC_CTRL_MAP_OLD in the uvcvideo driver -When: 3.2 -Why: Used only by applications compiled against older driver versions. - Superseded by UVCIOC_CTRL_MAP which supports V4L2 menu controls. -Who: Laurent Pinchart laurent.pinchart@ideasonboard.com - ----------------------------- - -What: Support for UVCIOC_CTRL_GET and UVCIOC_CTRL_SET in the uvcvideo driver -When: 3.2 -Why: Superseded by the UVCIOC_CTRL_QUERY ioctl. -Who: Laurent Pinchart laurent.pinchart@ideasonboard.com - ----------------------------- - What: Support for driver specific ioctls in the pwc driver (everything defined in media/pwc-ioctl.h) When: 3.3 @@@ -571,9 -594,18 +571,18 @@@ Why: In 3.0, we can now autodetect i Who: Lee, Chun-Yi jlee@novell.com
---------------------------- + What: The XFS nodelaylog mount option When: 3.3 Why: The delaylog mode that has been the default since 2.6.39 has proven stable, and the old code is in the way of additional improvements in the log code. Who: Christoph Hellwig hch@lst.de + + ---------------------------- + + What: iwlagn alias support + When: 3.5 + Why: The iwlagn module has been renamed iwlwifi. The alias will be around + for backward compatibility for several cycles and then dropped. + Who: Don Fry donald.h.fry@intel.com diff --combined MAINTAINERS index 0d7036c,5008b08..3cceec1 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -117,20 -117,20 +117,20 @@@ Maintainers List (try to look for most M: Philip Blundell philb@gnu.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/3c505* + F: drivers/net/ethernet/i825xx/3c505*
3C59X NETWORK DRIVER M: Steffen Klassert klassert@mathematik.tu-chemnitz.de L: netdev@vger.kernel.org S: Maintained F: Documentation/networking/vortex.txt - F: drivers/net/3c59x.c + F: drivers/net/ethernet/3com/3c59x.c
3CR990 NETWORK DRIVER M: David Dillow dave@thedillows.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/typhoon* + F: drivers/net/ethernet/3com/typhoon*
3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS) M: Adam Radford linuxraid@lsi.com @@@ -156,7 -156,7 +156,7 @@@ M: Realtek linux nic maintainers <nic_s M: Francois Romieu romieu@fr.zoreil.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/r8169.c + F: drivers/net/ethernet/realtek/r8169.c
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER M: Greg Kroah-Hartman gregkh@suse.de @@@ -170,8 -170,7 +170,7 @@@ F: include/linux/serial_8250. 8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.] L: netdev@vger.kernel.org S: Orphan / Obsolete - F: drivers/net/*8390* - F: drivers/net/ax88796.c + F: drivers/net/ethernet/8390/
9P FILE SYSTEM M: Eric Van Hensbergen ericvh@gmail.com @@@ -214,7 -213,7 +213,7 @@@ ACENIC DRIVE M: Jes Sorensen jes@trained-monkey.org L: linux-acenic@sunsite.dk S: Maintained - F: drivers/net/acenic* + F: drivers/net/ethernet/alteon/acenic*
ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER M: Peter Feuerer peter@piie.net @@@ -688,12 -687,6 +687,12 @@@ F: drivers/mtd/nand/bcm_umi_nand. F: drivers/mtd/nand/bcm_umi_bch.c F: drivers/mtd/nand/nand_bcm_umi.h
+ARM/CALXEDA HIGHBANK ARCHITECTURE +M: Rob Herring rob.herring@calxeda.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-highbank/ + ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT M: Anton Vorontsov avorontsov@mvista.com S: Maintained @@@ -752,7 -745,7 +751,7 @@@ L: linux-arm-kernel@lists.infradead.or W: http://www.arm.linux.org.uk/ S: Maintained F: arch/arm/mach-ebsa110/ - F: drivers/net/arm/am79c961a.* + F: drivers/net/ethernet/amd/am79c961a.*
ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6) M: Daniel Ribeiro drwyrm@gmail.com @@@ -793,13 -786,6 +792,13 @@@ L: linux-arm-kernel@lists.infradead.or S: Maintained F: arch/arm/mach-mx5/
+ARM/FREESCALE IMX6 +M: Shawn Guo shawn.guo@linaro.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +T: git git://git.linaro.org/people/shawnguo/linux-2.6.git +F: arch/arm/mach-imx/*imx6* + ARM/GLOMATION GESBC9312SX MACHINE SUPPORT M: Lennert Buytenhek kernel@wantstofly.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -1028,7 -1014,8 +1027,8 @@@ F: arch/arm/include/asm/hardware/ioc. F: arch/arm/include/asm/hardware/iomd.h F: arch/arm/include/asm/hardware/memc.h F: arch/arm/mach-rpc/ - F: drivers/net/arm/ether* + F: drivers/net/ethernet/i825xx/ether1* + F: drivers/net/ethernet/seeq/ether3* F: drivers/scsi/arm/
ARM/SHARK MACHINE SUPPORT @@@ -1097,24 -1084,6 +1097,24 @@@ F: arch/arm/plat-s5p/dev-fimc F: arch/arm/plat-samsung/include/plat/*fimc* F: drivers/media/video/s5p-fimc/
+ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT +M: Kyungmin Park kyungmin.park@samsung.com +M: Kamil Debski k.debski@samsung.com +L: linux-arm-kernel@lists.infradead.org +L: linux-media@vger.kernel.org +S: Maintained +F: arch/arm/plat-s5p/dev-mfc.c +F: drivers/media/video/s5p-mfc/ + +ARM/SAMSUNG S5P SERIES TV SUBSYSTEM SUPPORT +M: Kyungmin Park kyungmin.park@samsung.com +M: Tomasz Stanislawski t.stanislaws@samsung.com +L: linux-arm-kernel@lists.infradead.org +L: linux-media@vger.kernel.org +S: Maintained +F: arch/arm/plat-s5p/dev-tv.c +F: drivers/media/video/s5p-tv/ + ARM/SHMOBILE ARM ARCHITECTURE M: Paul Mundt lethal@linux-sh.org M: Magnus Damm magnus.damm@gmail.com @@@ -1158,7 -1127,7 +1158,7 @@@ F: arch/arm/mach-nuc93x F: drivers/input/keyboard/w90p910_keypad.c F: drivers/input/touchscreen/w90p910_ts.c F: drivers/watchdog/nuc900_wdt.c - F: drivers/net/arm/w90p910_ether.c + F: drivers/net/ethernet/nuvoton/w90p910_ether.c F: drivers/mtd/nand/nuc900_nand.c F: drivers/rtc/rtc-nuc900.c F: drivers/spi/spi_nuc900.c @@@ -1261,7 -1230,7 +1261,7 @@@ F: Documentation/aoe F: drivers/block/aoe/
ATHEROS ATH GENERIC UTILITIES - M: "Luis R. Rodriguez" lrodriguez@atheros.com + M: "Luis R. Rodriguez" mcgrof@qca.qualcomm.com L: linux-wireless@vger.kernel.org S: Supported F: drivers/net/wireless/ath/* @@@ -1269,7 -1238,7 +1269,7 @@@ ATHEROS ATH5K WIRELESS DRIVER M: Jiri Slaby jirislaby@gmail.com M: Nick Kossifidis mickflemm@gmail.com - M: "Luis R. Rodriguez" lrodriguez@atheros.com + M: "Luis R. Rodriguez" mcgrof@qca.qualcomm.com M: Bob Copeland me@bobcopeland.com L: linux-wireless@vger.kernel.org L: ath5k-devel@lists.ath5k.org @@@ -1277,11 -1246,19 +1277,19 @@@ W: http://wireless.kernel.org/en/users/ S: Maintained F: drivers/net/wireless/ath/ath5k/
+ ATHEROS ATH6KL WIRELESS DRIVER + M: Kalle Valo kvalo@qca.qualcomm.com + L: linux-wireless@vger.kernel.org + W: http://wireless.kernel.org/en/users/Drivers/ath6kl + T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git + S: Supported + F: drivers/net/wireless/ath/ath6kl/ + ATHEROS ATH9K WIRELESS DRIVER - M: "Luis R. Rodriguez" lrodriguez@atheros.com - M: Jouni Malinen jmalinen@atheros.com - M: Vasanthakumar Thiagarajan vasanth@atheros.com - M: Senthil Balasubramanian senthilkumar@atheros.com + M: "Luis R. Rodriguez" mcgrof@qca.qualcomm.com + M: Jouni Malinen jouni@qca.qualcomm.com + M: Vasanthakumar Thiagarajan vthiagar@qca.qualcomm.com + M: Senthil Balasubramanian senthilb@qca.qualcomm.com L: linux-wireless@vger.kernel.org L: ath9k-devel@lists.ath9k.org W: http://wireless.kernel.org/en/users/Drivers/ath9k @@@ -1313,7 -1290,7 +1321,7 @@@ L: netdev@vger.kernel.or W: http://sourceforge.net/projects/atl1 W: http://atl1.sourceforge.net S: Maintained - F: drivers/net/atlx/ + F: drivers/net/ethernet/atheros/
ATM M: Chas Williams chas@cmf.nrl.navy.mil @@@ -1353,7 -1330,7 +1361,7 @@@ F: include/video/atmel_lcdc. ATMEL MACB ETHERNET DRIVER M: Nicolas Ferre nicolas.ferre@atmel.com S: Supported - F: drivers/net/macb.* + F: drivers/net/ethernet/cadence/
ATMEL SPI DRIVER M: Nicolas Ferre nicolas.ferre@atmel.com @@@ -1476,7 -1453,7 +1484,7 @@@ BLACKFIN EMAC DRIVE L: uclinux-dist-devel@blackfin.uclinux.org W: http://blackfin.uclinux.org S: Supported - F: drivers/net/bfin_mac.* + F: drivers/net/ethernet/adi/
BLACKFIN RTC DRIVER M: Mike Frysinger vapier.adi@gmail.com @@@ -1557,27 -1534,27 +1565,27 @@@ BROADCOM B44 10/100 ETHERNET DRIVE M: Gary Zambrano zambrano@broadcom.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/b44.* + F: drivers/net/ethernet/broadcom/b44.*
BROADCOM BNX2 GIGABIT ETHERNET DRIVER M: Michael Chan mchan@broadcom.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/bnx2.* - F: drivers/net/bnx2_* + F: drivers/net/ethernet/broadcom/bnx2.* + F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER M: Eilon Greenstein eilong@broadcom.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/bnx2x/ + F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM TG3 GIGABIT ETHERNET DRIVER M: Matt Carlson mcarlson@broadcom.com M: Michael Chan mchan@broadcom.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/tg3.* + F: drivers/net/ethernet/broadcom/tg3.*
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER M: Brett Rudley brudley@broadcom.com @@@ -1606,7 -1583,7 +1614,7 @@@ BROCADE BNA 10 GIGABIT ETHERNET DRIVE M: Rasesh Mody rmody@brocade.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/bna/ + F: drivers/net/ethernet/brocade/bna/
BSG (block layer generic sg v4 driver) M: FUJITA Tomonori fujita.tomonori@lab.ntt.co.jp @@@ -1655,14 -1632,6 +1663,14 @@@ T: git git://git.alsa-project.org/alsa- S: Maintained F: sound/pci/oxygen/
+C6X ARCHITECTURE +M: Mark Salter msalter@redhat.com +M: Aurelien Jacquiot a-jacquiot@ti.com +L: linux-c6x-dev@linux-c6x.org +W: http://www.linux-c6x.org/wiki/index.php/Main_Page +S: Maintained +F: arch/c6x/ + CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS M: David Howells dhowells@redhat.com L: linux-cachefs@redhat.com @@@ -1702,7 -1671,7 +1710,7 @@@ CAN NETWORK LAYE M: Oliver Hartkopp socketcan@hartkopp.net M: Oliver Hartkopp oliver.hartkopp@volkswagen.de M: Urs Thuermann urs.thuermann@volkswagen.de - L: socketcan-core@lists.berlios.de (subscribers-only) + L: linux-can@vger.kernel.org L: netdev@vger.kernel.org W: http://developer.berlios.de/projects/socketcan/ S: Maintained @@@ -1714,7 -1683,7 +1722,7 @@@ F: include/linux/can/raw.
CAN NETWORK DRIVERS M: Wolfgang Grandegger wg@grandegger.com - L: socketcan-core@lists.berlios.de (subscribers-only) + L: linux-can@vger.kernel.org L: netdev@vger.kernel.org W: http://developer.berlios.de/projects/socketcan/ S: Maintained @@@ -1798,13 -1767,13 +1806,13 @@@ M: Christian Benvenuti <benve@cisco.com M: Roopa Prabhu roprabhu@cisco.com M: David Wang dwang2@cisco.com S: Supported - F: drivers/net/enic/ + F: drivers/net/ethernet/cisco/enic/
CIRRUS LOGIC EP93XX ETHERNET DRIVER M: Hartley Sweeten hsweeten@visionengravers.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/arm/ep93xx_eth.c + F: drivers/net/ethernet/cirrus/ep93xx_eth.c
CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER M: Lennert Buytenhek kernel@wantstofly.org @@@ -1944,7 -1913,7 +1952,7 @@@ CPMAC ETHERNET DRIVE M: Florian Fainelli florian@openwrt.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/cpmac.c + F: drivers/net/ethernet/ti/cpmac.c
CPU FREQUENCY DRIVERS M: Dave Jones davej@redhat.com @@@ -2031,7 -2000,7 +2039,7 @@@ M: Divy Le Ray <divy@chelsio.com L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported - F: drivers/net/cxgb3/ + F: drivers/net/ethernet/chelsio/cxgb3/
CXGB3 IWARP RNIC DRIVER (IW_CXGB3) M: Steve Wise swise@chelsio.com @@@ -2045,7 -2014,7 +2053,7 @@@ M: Dimitris Michailidis <dm@chelsio.com L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported - F: drivers/net/cxgb4/ + F: drivers/net/ethernet/chelsio/cxgb4/
CXGB4 IWARP RNIC DRIVER (IW_CXGB4) M: Steve Wise swise@chelsio.com @@@ -2059,14 -2028,14 +2067,14 @@@ M: Casey Leedom <leedom@chelsio.com L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported - F: drivers/net/cxgb4vf/ + F: drivers/net/ethernet/chelsio/cxgb4vf/
STMMAC ETHERNET DRIVER M: Giuseppe Cavallaro peppe.cavallaro@st.com L: netdev@vger.kernel.org W: http://www.stlinux.com S: Supported - F: drivers/net/stmmac/ + F: drivers/net/ethernet/stmicro/stmmac/
CYBERPRO FB DRIVER M: Russell King linux@arm.linux.org.uk @@@ -2110,7 -2079,7 +2118,7 @@@ DAVICOM FAST ETHERNET (DMFE) NETWORK DR L: netdev@vger.kernel.org S: Orphan F: Documentation/networking/dmfe.txt - F: drivers/net/tulip/dmfe.c + F: drivers/net/ethernet/tulip/dmfe.c
DC390/AM53C974 SCSI driver M: Kurt Garloff garloff@suse.de @@@ -2149,7 -2118,7 +2157,7 @@@ F: net/decnet DEFXX FDDI NETWORK DRIVER M: "Maciej W. Rozycki" macro@linux-mips.org S: Maintained - F: drivers/net/defxx.* + F: drivers/net/fddi/defxx.*
DELL LAPTOP DRIVER M: Matthew Garrett mjg59@srcf.ucam.org @@@ -2499,10 -2468,10 +2507,10 @@@ S: Supporte F: drivers/infiniband/hw/ehca/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER -M: Breno Leitao leitao@linux.vnet.ibm.com +M: Thadeu Lima de Souza Cascardo cascardo@linux.vnet.ibm.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/ehea/ + F: drivers/net/ethernet/ibm/ehea/
EMBEDDED LINUX M: Paul Gortmaker paul.gortmaker@windriver.com @@@ -2547,7 -2516,7 +2555,7 @@@ ETHEREXPRESS-16 NETWORK DRIVE M: Philip Blundell philb@gnu.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/eexpress.* + F: drivers/net/ethernet/i825xx/eexpress.*
ETHERNET BRIDGE M: Stephen Hemminger shemminger@linux-foundation.org @@@ -2561,7 -2530,7 +2569,7 @@@ F: net/bridge ETHERTEAM 16I DRIVER M: Mika Kuoppala miku@iki.fi S: Maintained - F: drivers/net/eth16i.c + F: drivers/net/ethernet/fujitsu/eth16i.c
EXT2 FILE SYSTEM M: Jan Kara jack@suse.cz @@@ -2725,7 -2694,7 +2733,7 @@@ M: Vitaly Bordug <vbordug@ru.mvista.com L: linuxppc-dev@lists.ozlabs.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/fs_enet/ + F: drivers/net/ethernet/freescale/fs_enet/ F: include/linux/fs_enet_pd.h
FREESCALE QUICC ENGINE LIBRARY @@@ -2747,7 -2716,7 +2755,7 @@@ M: Li Yang <leoli@freescale.com L: netdev@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained - F: drivers/net/ucc_geth* + F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE QUICC ENGINE UCC UART DRIVER M: Timur Tabi timur@freescale.com @@@ -3085,6 -3054,7 +3093,7 @@@ S: Maintaine F: include/linux/hippidevice.h F: include/linux/if_hippi.h F: net/802/hippi.c + F: drivers/net/hippi/
HOST AP DRIVER M: Jouni Malinen j@w1.fi @@@ -3102,7 -3072,7 +3111,7 @@@ F: drivers/platform/x86/tc1100-wmi. HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series M: Jaroslav Kysela perex@perex.cz S: Maintained - F: drivers/net/hp100.* + F: drivers/net/ethernet/hp/hp100.*
HPET: High Precision Event Timers driver M: Clemens Ladisch clemens@ladisch.de @@@ -3180,7 -3150,8 +3189,7 @@@ IA64 (Itanium) PLATFOR M: Tony Luck tony.luck@intel.com M: Fenghua Yu fenghua.yu@intel.com L: linux-ia64@vger.kernel.org -W: http://www.ia64-linux.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git S: Maintained F: arch/ia64/
@@@ -3199,7 -3170,7 +3208,7 @@@ IBM Power Virtual Ethernet Device Drive M: Santiago Leon santil@linux.vnet.ibm.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/ibmveth.* + F: drivers/net/ethernet/ibm/ibmveth.*
IBM ServeRAID RAID DRIVER P: Jack Hammer @@@ -3351,7 -3322,7 +3360,7 @@@ M: David Woodhouse <dwmw2@infradead.org L: iommu@lists.linux-foundation.org T: git git://git.infradead.org/iommu-2.6.git S: Supported -F: drivers/pci/intel-iommu.c +F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h
INTEL IOP-ADMA DMA DRIVER @@@ -3366,7 -3337,7 +3375,7 @@@ F: arch/arm/mach-ixp4xx/include/mach/qm F: arch/arm/mach-ixp4xx/include/mach/npe.h F: arch/arm/mach-ixp4xx/ixp4xx_qmgr.c F: arch/arm/mach-ixp4xx/ixp4xx_npe.c - F: drivers/net/arm/ixp4xx_eth.c + F: drivers/net/ethernet/xscale/ixp4xx_eth.c F: drivers/net/wan/ixp4xx_hss.c
INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT @@@ -3378,7 -3349,7 +3387,7 @@@ INTEL IXP2000 ETHERNET DRIVE M: Lennert Buytenhek kernel@wantstofly.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/ixp2000/ + F: drivers/net/ethernet/xscale/ixp2000/
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf) M: Jeff Kirsher jeffrey.t.kirsher@intel.com @@@ -3387,13 -3358,13 +3396,13 @@@ M: Bruce Allan <bruce.w.allan@intel.com M: Carolyn Wyborny carolyn.wyborny@intel.com M: Don Skidmore donald.c.skidmore@intel.com M: Greg Rose gregory.v.rose@intel.com - M: PJ Waskiewicz peter.p.waskiewicz.jr@intel.com + M: Peter P Waskiewicz Jr peter.p.waskiewicz.jr@intel.com M: Alex Duyck alexander.h.duyck@intel.com M: John Ronciak john.ronciak@intel.com L: e1000-devel@lists.sourceforge.net W: http://e1000.sourceforge.net/ - T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git - T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git S: Supported F: Documentation/networking/e100.txt F: Documentation/networking/e1000.txt @@@ -3403,14 -3374,7 +3412,7 @@@ F: Documentation/networking/igbvf.tx F: Documentation/networking/ixgb.txt F: Documentation/networking/ixgbe.txt F: Documentation/networking/ixgbevf.txt - F: drivers/net/e100.c - F: drivers/net/e1000/ - F: drivers/net/e1000e/ - F: drivers/net/igb/ - F: drivers/net/igbvf/ - F: drivers/net/ixgb/ - F: drivers/net/ixgbe/ - F: drivers/net/ixgbevf/ + F: drivers/net/ethernet/intel/
INTEL MRST PMU DRIVER M: Len Brown len.brown@intel.com @@@ -3462,7 -3426,7 +3464,7 @@@ M: Wey-Yi Guy <wey-yi.w.guy@intel.com M: Intel Linux Wireless ilw@linux.intel.com L: linux-wireless@vger.kernel.org W: http://intellinuxwireless.org - T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-2.6.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git S: Supported F: drivers/net/wireless/iwlwifi/
@@@ -3478,7 -3442,7 +3480,7 @@@ IOC3 ETHERNET DRIVE M: Ralf Baechle ralf@linux-mips.org L: linux-mips@linux-mips.org S: Maintained - F: drivers/net/ioc3-eth.c + F: drivers/net/ethernet/sgi/ioc3-eth.c
IOC3 SERIAL DRIVER M: Pat Gefre pfg@sgi.com @@@ -3496,7 -3460,7 +3498,7 @@@ M: Francois Romieu <romieu@fr.zoreil.co M: Sorbica Shieh sorbica@icplus.com.tw L: netdev@vger.kernel.org S: Maintained - F: drivers/net/ipg.* + F: drivers/net/ethernet/icplus/ipg.*
IPATH DRIVER M: Mike Marciniszyn infinipath@qlogic.com @@@ -3644,7 -3608,7 +3646,7 @@@ JME NETWORK DRIVE M: Guo-Fu Tseng cooldavid@cooldavid.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/jme.* + F: drivers/net/ethernet/jme.*
JOURNALLING FLASH FILE SYSTEM V2 (JFFS2) M: David Woodhouse dwmw2@infradead.org @@@ -4066,7 -4030,6 +4068,7 @@@ F: fs/partitions/ldm.
LogFS M: Joern Engel joern@logfs.org +M: Prasad Joshi prasadjoshi.linux@gmail.com L: logfs@logfs.org W: logfs.org S: Maintained @@@ -4176,7 -4139,7 +4178,7 @@@ MARVELL MV643XX ETHERNET DRIVE M: Lennert Buytenhek buytenh@wantstofly.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/mv643xx_eth.* + F: drivers/net/ethernet/marvell/mv643xx_eth.* F: include/linux/mv643xx.h
MARVELL MWIFIEX WIRELESS DRIVER @@@ -4390,12 -4353,12 +4392,12 @@@ M: Andrew Gallatin <gallatin@myri.com L: netdev@vger.kernel.org W: http://www.myri.com/scs/download-Myri10GE.html S: Supported - F: drivers/net/myri10ge/ + F: drivers/net/ethernet/myricom/myri10ge/
NATSEMI ETHERNET DRIVER (DP8381x) M: Tim Hockin thockin@hockin.org S: Maintained - F: drivers/net/natsemi.c + F: drivers/net/ethernet/natsemi/natsemi.c
NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER M: Daniel Mack zonque@gmail.com @@@ -4435,9 -4398,8 +4437,8 @@@ W: http://trac.neterion.com/cgi-bin/tra W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous S: Supported F: Documentation/networking/s2io.txt - F: drivers/net/s2io* F: Documentation/networking/vxge.txt - F: drivers/net/vxge/ + F: drivers/net/ethernet/neterion/
NETFILTER/IPTABLES/IPCHAINS P: Rusty Russell @@@ -4551,11 -4513,23 +4552,23 @@@ F: include/linux/if_ F: include/linux/*device.h
NETXEN (1/10) GbE SUPPORT - M: Amit Kumar Salecha amit.salecha@qlogic.com + M: Sony Chacko sony.chacko@qlogic.com + M: Rajesh Borundia rajesh.borundia@qlogic.com L: netdev@vger.kernel.org W: http://www.qlogic.com S: Supported - F: drivers/net/netxen/ + F: drivers/net/ethernet/qlogic/netxen/ + + NFC SUBSYSTEM + M: Lauro Ramos Venancio lauro.venancio@openbossa.org + M: Aloisio Almeida Jr aloisio.almeida@openbossa.org + M: Samuel Ortiz sameo@linux.intel.com + L: linux-wireless@vger.kernel.org + S: Maintained + F: net/nfc/ + F: include/linux/nfc.h + F: include/net/nfc/ + F: drivers/nfc/
NFS, SUNRPC, AND LOCKD CLIENTS M: Trond Myklebust Trond.Myklebust@netapp.com @@@ -4576,7 -4550,7 +4589,7 @@@ M: Jan-Pascal van Best <janpascal@vanbe M: Andreas Mohr andi@lisas.de L: netdev@vger.kernel.org S: Maintained - F: drivers/net/ni5010.* + F: drivers/net/ethernet/racal/ni5010.*
NILFS2 FILESYSTEM M: KONISHI Ryusuke konishi.ryusuke@lab.ntt.co.jp @@@ -4842,7 -4816,7 +4855,7 @@@ PA SEMI ETHERNET DRIVE M: Olof Johansson olof@lixom.net L: netdev@vger.kernel.org S: Maintained - F: drivers/net/pasemi_mac.* + F: drivers/net/ethernet/pasemi/*
PA SEMI SMBUS DRIVER M: Olof Johansson olof@lixom.net @@@ -4989,7 -4963,7 +5002,7 @@@ PCNET32 NETWORK DRIVE M: Don Fry pcnet32@frontier.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/pcnet32.c + F: drivers/net/ethernet/amd/pcnet32.c
PCRYPT PARALLEL CRYPTO ENGINE M: Steffen Klassert steffen.klassert@secunet.com @@@ -5121,7 -5095,7 +5134,7 @@@ PPP PROTOCOL DRIVERS AND COMPRESSOR M: Paul Mackerras paulus@samba.org L: linux-ppp@vger.kernel.org S: Maintained - F: drivers/net/ppp_* + F: drivers/net/ppp/ppp_*
PPP OVER ATM (RFC 2364) M: Mitchell Blank Jr mitch@sfgoth.com @@@ -5132,8 -5106,8 +5145,8 @@@ F: include/linux/atmppp. PPP OVER ETHERNET M: Michal Ostrowski mostrows@earthlink.net S: Maintained - F: drivers/net/pppoe.c - F: drivers/net/pppox.c + F: drivers/net/ppp/pppoe.c + F: drivers/net/ppp/pppox.c
PPP OVER L2TP M: James Chapman jchapman@katalix.com @@@ -5154,7 -5128,7 +5167,7 @@@ PPTP DRIVE M: Dmitry Kozlov xeb@mail.ru L: netdev@vger.kernel.org S: Maintained - F: drivers/net/pptp.c + F: drivers/net/ppp/pptp.c W: http://sourceforge.net/projects/accel-pptp
PREEMPTIBLE KERNEL @@@ -5183,7 -5157,7 +5196,7 @@@ M: Geoff Levand <geoff@infradead.org L: netdev@vger.kernel.org L: cbe-oss-dev@lists.ozlabs.org S: Maintained - F: drivers/net/ps3_gelic_net.* + F: drivers/net/ethernet/toshiba/ps3_gelic_net.*
PS3 PLATFORM SUPPORT M: Geoff Levand geoff@infradead.org @@@ -5301,23 -5275,24 +5314,24 @@@ M: linux-driver@qlogic.co L: netdev@vger.kernel.org S: Supported F: Documentation/networking/LICENSE.qla3xxx - F: drivers/net/qla3xxx.* + F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER - M: Amit Kumar Salecha amit.salecha@qlogic.com M: Anirban Chakraborty anirban.chakraborty@qlogic.com + M: Sony Chacko sony.chacko@qlogic.com M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/qlcnic/ + F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER + M: Anirban Chakraborty anirban.chakraborty@qlogic.com M: Jitendra Kalsaria jitendra.kalsaria@qlogic.com M: Ron Mercer ron.mercer@qlogic.com M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/qlge/ + F: drivers/net/ethernet/qlogic/qlge/
QNX4 FILESYSTEM M: Anders Larsen al@alarsen.net @@@ -5327,12 -5302,6 +5341,12 @@@ F: fs/qnx4 F: include/linux/qnx4_fs.h F: include/linux/qnxtypes.h
+QUALCOMM HEXAGON ARCHITECTURE +M: Richard Kuo rkuo@codeaurora.org +L: linux-hexagon@vger.kernel.org +S: Supported +F: arch/hexagon/ + RADOS BLOCK DEVICE (RBD) F: include/linux/qnxtypes.h M: Yehuda Sadeh yehuda@hq.newdream.net @@@ -5405,7 -5374,7 +5419,7 @@@ RDC R6040 FAST ETHERNET DRIVE M: Florian Fainelli florian@openwrt.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/r6040.c + F: drivers/net/ethernet/rdc/r6040.c
RDS - RELIABLE DATAGRAM SOCKETS M: Andy Grover andy.grover@oracle.com @@@ -5809,7 -5778,7 +5823,7 @@@ M: Ajit Khaparde <ajit.khaparde@emulex. L: netdev@vger.kernel.org W: http://www.emulex.com S: Supported - F: drivers/net/benet/ + F: drivers/net/ethernet/emulex/benet/
SFC NETWORK DRIVER M: Solarflare linux maintainers linux-net-drivers@solarflare.com @@@ -5817,7 -5786,7 +5831,7 @@@ M: Steve Hodgson <shodgson@solarflare.c M: Ben Hutchings bhutchings@solarflare.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/sfc/ + F: drivers/net/ethernet/sfc/
SGI GRU DRIVER M: Jack Steiner steiner@sgi.com @@@ -5883,14 -5852,14 +5897,14 @@@ SIS 190 ETHERNET DRIVE M: Francois Romieu romieu@fr.zoreil.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/sis190.c + F: drivers/net/ethernet/sis/sis190.c
SIS 900/7016 FAST ETHERNET DRIVER M: Daniele Venzano venza@brownhat.org W: http://www.brownhat.org/sis900.html L: netdev@vger.kernel.org S: Maintained - F: drivers/net/sis900.* + F: drivers/net/ethernet/sis/sis900.*
SIS 96X I2C/SMBUS DRIVER M: "Mark M. Hoffman" mhoffman@lightlink.com @@@ -5917,8 -5886,7 +5931,7 @@@ SKGE, SKY2 10/100/1000 GIGABIT ETHERNE M: Stephen Hemminger shemminger@linux-foundation.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/skge.* - F: drivers/net/sky2.* + F: drivers/net/ethernet/marvell/sk*
SLAB ALLOCATOR M: Christoph Lameter cl@linux-foundation.org @@@ -5932,7 -5900,7 +5945,7 @@@ F: mm/sl?b. SMC91x ETHERNET DRIVER M: Nicolas Pitre nico@fluxnic.net S: Odd Fixes - F: drivers/net/smc91x.* + F: drivers/net/ethernet/smsc/smc91x.*
SMM665 HARDWARE MONITOR DRIVER M: Guenter Roeck linux@roeck-us.net @@@ -5967,13 -5935,13 +5980,13 @@@ M: Steve Glendinning <steve.glendinning L: netdev@vger.kernel.org S: Supported F: include/linux/smsc911x.h - F: drivers/net/smsc911x.* + F: drivers/net/ethernet/smsc/smsc911x.*
SMSC9420 PCI ETHERNET DRIVER M: Steve Glendinning steve.glendinning@smsc.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/smsc9420.* + F: drivers/net/ethernet/smsc/smsc9420.*
SN-IA64 (Itanium) SUB-PLATFORM M: Jes Sorensen jes@sgi.com @@@ -6007,7 -5975,7 +6020,7 @@@ SONIC NETWORK DRIVE M: Thomas Bogendoerfer tsbogend@alpha.franken.de L: netdev@vger.kernel.org S: Maintained - F: drivers/net/sonic.* + F: drivers/net/ethernet/natsemi/sonic.*
SONICS SILICON BACKPLANE DRIVER (SSB) M: Michael Buesch m@bues.ch @@@ -6148,7 -6116,7 +6161,7 @@@ M: Jens Osterkamp <jens@de.ibm.com L: netdev@vger.kernel.org S: Supported F: Documentation/networking/spider_net.txt - F: drivers/net/spider_net* + F: drivers/net/ethernet/toshiba/spider_net*
SPU FILE SYSTEM M: Jeremy Kerr jk@ozlabs.org @@@ -6195,12 -6163,6 +6208,6 @@@ M: Jakub Schmidtke <sjakub@gmail.com S: Odd Fixes F: drivers/staging/asus_oled/
- STAGING - ATHEROS ATH6KL WIRELESS DRIVER - M: Luis R. Rodriguez mcgrof@gmail.com - M: Naveen Singh nsingh@atheros.com - S: Odd Fixes - F: drivers/staging/ath6kl/ - STAGING - COMEDI M: Ian Abbott abbotti@mev.co.uk M: Mori Hess fmhess@users.sourceforge.net @@@ -6326,7 -6288,7 +6333,7 @@@ F: drivers/staging/xgifb STARFIRE/DURALAN NETWORK DRIVER M: Ion Badulescu ionut@badula.org S: Odd Fixes - F: drivers/net/starfire* + F: drivers/net/ethernet/adaptec/starfire*
SUN3/3X M: Sam Creasey sammy@sammy.net @@@ -6335,6 -6297,7 +6342,7 @@@ S: Maintaine F: arch/m68k/kernel/*sun3* F: arch/m68k/sun3*/ F: arch/m68k/include/asm/sun3* + F: drivers/net/ethernet/i825xx/sun3*
SUPERH M: Paul Mundt lethal@linux-sh.org @@@ -6411,10 -6374,10 +6419,10 @@@ F: net/ipv4/tcp_lp.
TEGRA SUPPORT M: Colin Cross ccross@android.com -M: Erik Gilling konkers@android.com M: Olof Johansson olof@lixom.net +M: Stephen Warren swarren@nvidia.com L: linux-tegra@vger.kernel.org -T: git git://android.git.kernel.org/kernel/tegra.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git S: Supported F: arch/arm/mach-tegra
@@@ -6422,7 -6385,7 +6430,7 @@@ TEHUTI ETHERNET DRIVE M: Andy Gospodarek andy@greyhouse.net L: netdev@vger.kernel.org S: Supported - F: drivers/net/tehuti* + F: drivers/net/ethernet/tehuti/*
Telecom Clock Driver for MCPL0010 M: Mark Gross mark.gross@intel.com @@@ -6473,7 -6436,7 +6481,7 @@@ W: http://www.tilera.com/scm S: Supported F: arch/tile/ F: drivers/tty/hvc/hvc_tile.c - F: drivers/net/tile/ + F: drivers/net/ethernet/tile/ F: drivers/edac/tile_edac.c
TLAN NETWORK DRIVER @@@ -6482,7 -6445,7 +6490,7 @@@ L: tlan-devel@lists.sourceforge.net (su W: http://sourceforge.net/projects/tlan/ S: Maintained F: Documentation/networking/tlan.txt - F: drivers/net/tlan.* + F: drivers/net/ethernet/ti/tlan.*
TOMOYO SECURITY MODULE M: Kentaro Takeda takedakn@nttdata.co.jp @@@ -6576,7 -6539,7 +6584,7 @@@ TULIP NETWORK DRIVER M: Grant Grundler grundler@parisc-linux.org L: netdev@vger.kernel.org S: Maintained - F: drivers/net/tulip/ + F: drivers/net/ethernet/tulip/
TUN/TAP driver M: Maxim Krasnyansky maxk@qualcomm.com @@@ -6622,10 -6585,11 +6630,10 @@@ W: http://uclinux-h8.sourceforge.jp S: Supported F: arch/h8300/ F: drivers/ide/ide-h8300.c - F: drivers/net/ne-h8300.c + F: drivers/net/ethernet/8390/ne-h8300.c
UDF FILESYSTEM M: Jan Kara jack@suse.cz -W: http://linux-udf.sourceforge.net S: Maintained F: Documentation/filesystems/udf.txt F: fs/udf/ @@@ -7049,7 -7013,7 +7057,7 @@@ F: include/linux/vhost. VIA RHINE NETWORK DRIVER M: Roger Luethi rl@hellgate.ch S: Maintained - F: drivers/net/via-rhine.c + F: drivers/net/ethernet/via/via-rhine.c
VIAPRO SMBUS DRIVER M: Jean Delvare khali@linux-fr.org @@@ -7077,7 -7041,7 +7085,7 @@@ VIA VELOCITY NETWORK DRIVE M: Francois Romieu romieu@fr.zoreil.com L: netdev@vger.kernel.org S: Maintained - F: drivers/net/via-velocity.* + F: drivers/net/ethernet/via/via-velocity.*
VLAN (802.1Q) M: Patrick McHardy kaber@trash.net @@@ -7186,12 -7150,6 +7194,12 @@@ L: linux-scsi@vger.kernel.or S: Maintained F: drivers/scsi/wd7000.c
+WIIMOTE HID DRIVER +M: David Herrmann dh.herrmann@googlemail.com +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/hid-wiimote* + WINBOND CIR DRIVER M: David Härdeman david@hardeman.nu S: Maintained diff --combined arch/mips/Kconfig index a926484,0dbb4ed..62b9677 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@@ -24,7 -24,6 +24,7 @@@ config MIP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select HAVE_ARCH_JUMP_LABEL + select IRQ_FORCED_THREADING
menu "Machine selection"
@@@ -47,8 -46,6 +47,8 @@@ config MIPS_ALCHEM select GENERIC_GPIO select ARCH_WANT_OPTIONAL_GPIOLIB select SYS_SUPPORTS_ZBOOT + select USB_ARCH_HAS_OHCI + select USB_ARCH_HAS_EHCI
config AR7 bool "Texas Instruments AR7" @@@ -94,15 -91,8 +94,8 @@@ config BCM47X select DMA_NONCOHERENT select HW_HAS_PCI select IRQ_CPU - select SYS_HAS_CPU_MIPS32_R1 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN - select SSB - select SSB_DRIVER_MIPS - select SSB_DRIVER_EXTIF - select SSB_EMBEDDED - select SSB_B43_PCI_BRIDGE if PCI - select SSB_PCICORE_HOSTMODE if PCI select GENERIC_GPIO select SYS_HAS_EARLY_PRINTK select CFE @@@ -215,7 -205,6 +208,7 @@@ config MACH_JZ474 select SYS_HAS_EARLY_PRINTK select HAVE_PWM select HAVE_CLK + select GENERIC_IRQ_CHIP
config LANTIQ bool "Lantiq based platforms" @@@ -726,7 -715,6 +719,7 @@@ config CAVIUM_OCTEON_SIMULATO select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_HOTPLUG_CPU select SYS_HAS_CPU_CAVIUM_OCTEON + select HOLES_IN_ZONE help The Octeon simulator is software performance model of the Cavium Octeon Processor. It supports simulating Octeon processors on x86 @@@ -749,7 -737,6 +742,7 @@@ config CAVIUM_OCTEON_REFERENCE_BOAR select ZONE_DMA32 select USB_ARCH_HAS_OHCI select USB_ARCH_HAS_EHCI + select HOLES_IN_ZONE help This option supports all of the Octeon reference boards from Cavium Networks. It builds a kernel that dynamically determines the Octeon @@@ -794,6 -781,7 +787,7 @@@ endchoic
source "arch/mips/alchemy/Kconfig" source "arch/mips/ath79/Kconfig" + source "arch/mips/bcm47xx/Kconfig" source "arch/mips/bcm63xx/Kconfig" source "arch/mips/jazz/Kconfig" source "arch/mips/jz4740/Kconfig" @@@ -979,9 -967,6 +973,9 @@@ config ISA_DMA_AP config GENERIC_GPIO bool
+config HOLES_IN_ZONE + bool + # # Endianess selection. Sufficiently obscure so many users don't know what to # answer,so we try hard to limit the available choices. Also the use of a @@@ -2101,7 -2086,7 +2095,7 @@@ config NODES_SHIF
config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" - depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && CPU_MIPS32 + depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON) default y help Enable hardware performance counter support for perf events. If diff --combined arch/powerpc/Kconfig index 8523bd1,47682b6..85195e4 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@@ -323,7 -323,7 +323,7 @@@ config SWIOTL
config HOTPLUG_CPU bool "Support for enabling/disabling CPUs" - depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) + depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC || PPC_POWERNV) ---help--- Say Y here to be able to disable and re-enable individual CPUs at runtime on SMP machines. @@@ -345,7 -345,7 +345,7 @@@ config ARCH_ENABLE_MEMORY_HOTREMOV
config KEXEC bool "kexec system call (EXPERIMENTAL)" - depends on (PPC_BOOK3S || FSL_BOOKE) && EXPERIMENTAL + depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !47x)) && EXPERIMENTAL help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@@ -429,7 -429,8 +429,7 @@@ config ARCH_POPULATES_NODE_MA def_bool y
config SYS_SUPPORTS_HUGETLBFS - def_bool y - depends on PPC_BOOK3S_64 + bool
source "mm/Kconfig"
@@@ -655,6 -656,8 +655,8 @@@ config SBU
config FSL_SOC bool + select HAVE_CAN_FLEXCAN if NET && CAN + select PPC_CLOCK if CAN_FLEXCAN
config FSL_PCI bool diff --combined arch/powerpc/configs/ppc40x_defconfig index e9d920c,7cb703b..1eb19ac --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig @@@ -14,6 -14,7 +14,6 @@@ CONFIG_MODULE_UNLOAD= CONFIG_PPC4xx_GPIO=y CONFIG_ACADIA=y CONFIG_EP405=y -CONFIG_HCU4=y CONFIG_HOTFOOT=y CONFIG_KILAUEA=y CONFIG_MAKALU=y @@@ -49,8 -50,9 +49,9 @@@ CONFIG_BLK_DEV_RAM= CONFIG_BLK_DEV_RAM_SIZE=35000 CONFIG_XILINX_SYSACE=m CONFIG_NETDEVICES=y - CONFIG_NET_ETHERNET=y - CONFIG_IBM_NEW_EMAC=y + CONFIG_ETHERNET=y + CONFIG_NET_VENDOR_IBM=y + CONFIG_IBM_EMAC=y # CONFIG_INPUT is not set CONFIG_SERIO=m # CONFIG_SERIO_I8042 is not set diff --combined arch/powerpc/platforms/40x/Kconfig index ae5e0bf,b5d8706..8f9c3e2 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@@ -32,6 -32,14 +32,6 @@@ config EP40 help This option enables support for the EP405/EP405PC boards.
-config HCU4 - bool "Hcu4" - depends on 40x - default n - select 405GPR - help - This option enables support for the Nestal Maschinen HCU4 board. - config HOTFOOT bool "Hotfoot" depends on 40x @@@ -122,21 -130,21 +122,21 @@@ config 405G bool select IBM405_ERR77 select IBM405_ERR51 - select IBM_NEW_EMAC_ZMII + select IBM_EMAC_ZMII
config 405EP bool
config 405EX bool - select IBM_NEW_EMAC_EMAC4 - select IBM_NEW_EMAC_RGMII + select IBM_EMAC_EMAC4 + select IBM_EMAC_RGMII
config 405EZ bool - select IBM_NEW_EMAC_NO_FLOW_CTRL - select IBM_NEW_EMAC_MAL_CLR_ICINTSTAT - select IBM_NEW_EMAC_MAL_COMMON_ERR + select IBM_EMAC_NO_FLOW_CTRL + select IBM_EMAC_MAL_CLR_ICINTSTAT + select IBM_EMAC_MAL_COMMON_ERR
config 405GPR bool diff --combined arch/s390/include/asm/qdio.h index cbbf7d8,2199362..e63d13d --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@@ -46,8 -46,6 +46,8 @@@ struct qdesfmt0 u32 : 16; } __attribute__ ((packed));
+#define QDR_AC_MULTI_BUFFER_ENABLE 0x01 + /** * struct qdr - queue description record (QDR) * @qfmt: queue format @@@ -125,6 -123,40 +125,40 @@@ struct slibe };
/** + * struct qaob - queue asynchronous operation block + * @res0: reserved parameters + * @res1: reserved parameter + * @res2: reserved parameter + * @res3: reserved parameter + * @aorc: asynchronous operation return code + * @flags: internal flags + * @cbtbs: control block type + * @sb_count: number of storage blocks + * @sba: storage block element addresses + * @dcount: size of storage block elements + * @user0: user defineable value + * @res4: reserved paramater + * @user1: user defineable value + * @user2: user defineable value + */ + struct qaob { + u64 res0[6]; + u8 res1; + u8 res2; + u8 res3; + u8 aorc; + u8 flags; + u16 cbtbs; + u8 sb_count; + u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER]; + u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER]; + u64 user0; + u64 res4[2]; + u64 user1; + u64 user2; + } __attribute__ ((packed, aligned(256))); + + /** * struct slib - storage list information block (SLIB) * @nsliba: next SLIB address (if any) * @sla: SL address @@@ -224,11 -256,44 +258,46 @@@ struct slsb u8 val[QDIO_MAX_BUFFERS_PER_Q]; } __attribute__ ((packed, aligned(256)));
+#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080 +#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040 #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 #define CHSC_AC2_DATA_DIV_ENABLED 0x0002
+ /** + * struct qdio_outbuf_state - SBAL related asynchronous operation information + * (for communication with upper layer programs) + * (only required for use with completion queues) + * @flags: flags indicating state of buffer + * @aob: pointer to QAOB used for the particular SBAL + * @user: pointer to upper layer program's state information related to SBAL + * (stored in user1 data of QAOB) + */ + struct qdio_outbuf_state { + u8 flags; + struct qaob *aob; + void *user; + }; + + #define QDIO_OUTBUF_STATE_FLAG_NONE 0x00 + #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01 + + #define CHSC_AC1_INITIATE_INPUTQ 0x80 + + + /* qdio adapter-characteristics-1 flag */ + #define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ + #define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ + #define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ + #define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */ + #define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */ + #define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ + #define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ + + #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 + #define CHSC_AC2_DATA_DIV_ENABLED 0x0002 + + #define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000 + struct qdio_ssqd_desc { u8 flags; u8:8; @@@ -247,8 -312,7 +316,7 @@@ u64 sch_token; u8 mro; u8 mri; - u8:8; - u8 sbalic; + u16 qdioac3; u16:16; u8:8; u8 mmwc; @@@ -284,14 -348,15 +352,16 @@@ typedef void qdio_handler_t(struct ccw_ * @no_output_qs: number of output queues * @input_handler: handler to be called for input queues * @output_handler: handler to be called for output queues + * @queue_start_poll: polling handlers (one per input queue or NULL) * @int_parm: interruption parameter * @input_sbal_addr_array: address of no_input_qs * 128 pointers * @output_sbal_addr_array: address of no_output_qs * 128 pointers + * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL) */ struct qdio_initialize { struct ccw_device *cdev; unsigned char q_format; + unsigned char qdr_ac; unsigned char adapter_name[8]; unsigned int qib_param_field_format; unsigned char *qib_param_field; @@@ -302,11 -367,12 +372,12 @@@ unsigned int no_output_qs; qdio_handler_t *input_handler; qdio_handler_t *output_handler; - void (*queue_start_poll) (struct ccw_device *, int, unsigned long); + void (**queue_start_poll) (struct ccw_device *, int, unsigned long); int scan_threshold; unsigned long int_parm; void **input_sbal_addr_array; void **output_sbal_addr_array; + struct qdio_outbuf_state *output_sbal_state_array; };
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ @@@ -321,6 -387,7 +392,7 @@@ extern int qdio_allocate(struct qdio_initialize *); extern int qdio_establish(struct qdio_initialize *); extern int qdio_activate(struct ccw_device *); + extern void qdio_release_aob(struct qaob *); extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, unsigned int); extern int qdio_start_irq(struct ccw_device *, int); diff --combined drivers/infiniband/hw/nes/nes_nic.c index 64f91d8,47b2ee4..c00d2f3 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@@ -441,13 -441,13 +441,13 @@@ static int nes_nic_send(struct sk_buff nesnic->tx_skb[nesnic->sq_head] = skb; for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags; skb_fragment_index++) { - bus_address = pci_map_page( nesdev->pcidev, - skb_shinfo(skb)->frags[skb_fragment_index].page, - skb_shinfo(skb)->frags[skb_fragment_index].page_offset, - skb_shinfo(skb)->frags[skb_fragment_index].size, - PCI_DMA_TODEVICE); + skb_frag_t *frag = + &skb_shinfo(skb)->frags[skb_fragment_index]; + bus_address = skb_frag_dma_map(&nesdev->pcidev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); wqe_fragment_length[wqe_fragment_index] = - cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size); + cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[skb_fragment_index])); set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), bus_address); wqe_fragment_index++; @@@ -561,11 -561,12 +561,12 @@@ tso_sq_no_longer_full /* Map all the buffers */ for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags; tso_frag_count++) { - tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev, - skb_shinfo(skb)->frags[tso_frag_count].page, - skb_shinfo(skb)->frags[tso_frag_count].page_offset, - skb_shinfo(skb)->frags[tso_frag_count].size, - PCI_DMA_TODEVICE); + skb_frag_t *frag = + &skb_shinfo(skb)->frags[tso_frag_count]; + tso_bus_address[tso_frag_count] = + skb_frag_dma_map(&nesdev->pcidev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); }
tso_frag_index = 0; @@@ -636,11 -637,11 +637,11 @@@ } while (wqe_fragment_index < 5) { wqe_fragment_length[wqe_fragment_index] = - cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size); + cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index])); set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), (u64)tso_bus_address[tso_frag_index]); wqe_fragment_index++; - tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size; + tso_wqe_length += skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index++]); if (wqe_fragment_index < 5) wqe_fragment_length[wqe_fragment_index] = 0; if (tso_frag_index == tso_frag_count) @@@ -1090,8 -1091,6 +1091,8 @@@ static const char nes_ethtool_stringset "LRO aggregated", "LRO flushed", "LRO no_desc", + "PAU CreateQPs", + "PAU DestroyQPs", }; #define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
@@@ -1307,8 -1306,6 +1308,8 @@@ static void nes_netdev_get_ethtool_stat target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; + target_stat_values[++index] = atomic_read(&pau_qps_created); + target_stat_values[++index] = atomic_read(&pau_qps_destroyed); }
/** @@@ -1642,7 -1639,7 +1643,7 @@@ static const struct net_device_ops nes_ .ndo_get_stats = nes_netdev_get_stats, .ndo_tx_timeout = nes_netdev_tx_timeout, .ndo_set_mac_address = nes_netdev_set_mac_address, - .ndo_set_multicast_list = nes_netdev_set_multicast_list, + .ndo_set_rx_mode = nes_netdev_set_multicast_list, .ndo_change_mtu = nes_netdev_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = nes_fix_features, diff --combined drivers/infiniband/ulp/ipoib/ipoib_cm.c index ef003cc,c74548a..57b4f9a --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@@ -169,7 -169,7 +169,7 @@@ static struct sk_buff *ipoib_cm_alloc_r goto partial_error; skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
- mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page, + mapping[i + 1] = ib_dma_map_page(priv->ca, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) goto partial_error; @@@ -537,12 -537,13 +537,13 @@@ static void skb_put_frags(struct sk_buf
if (length == 0) { /* don't need this page */ - skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); + skb_fill_page_desc(toskb, i, skb_frag_page(frag), + 0, PAGE_SIZE); --skb_shinfo(skb)->nr_frags; } else { size = min(length, (unsigned) PAGE_SIZE);
- frag->size = size; + skb_frag_size_set(frag, size); skb->data_len += size; skb->truesize += size; skb->len += size; @@@ -1496,7 -1497,6 +1497,7 @@@ static void ipoib_cm_create_srq(struct { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_srq_init_attr srq_init_attr = { + .srq_type = IB_SRQT_BASIC, .attr = { .max_wr = ipoib_recvq_size, .max_sge = max_sge diff --combined drivers/net/bonding/bond_main.c index de3d351,41430ba..71efff3 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -557,7 -557,7 +557,7 @@@ down static int bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; - struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET }; + struct ethtool_cmd ecmd; u32 slave_speed; int res;
@@@ -565,18 -565,15 +565,15 @@@ slave->speed = SPEED_100; slave->duplex = DUPLEX_FULL;
- if (!slave_dev->ethtool_ops || !slave_dev->ethtool_ops->get_settings) - return -1; - - res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool); + res = __ethtool_get_settings(slave_dev, &ecmd); if (res < 0) return -1;
- slave_speed = ethtool_cmd_speed(&etool); + slave_speed = ethtool_cmd_speed(&ecmd); if (slave_speed == 0 || slave_speed == ((__u32) -1)) return -1;
- switch (etool.duplex) { + switch (ecmd.duplex) { case DUPLEX_FULL: case DUPLEX_HALF: break; @@@ -585,7 -582,7 +582,7 @@@ }
slave->speed = slave_speed; - slave->duplex = etool.duplex; + slave->duplex = ecmd.duplex;
return 0; } @@@ -1435,8 -1432,6 +1432,8 @@@ static rx_handler_result_t bond_handle_ struct sk_buff *skb = *pskb; struct slave *slave; struct bonding *bond; + void (*recv_probe)(struct sk_buff *, struct bonding *, + struct slave *);
skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) @@@ -1450,12 -1445,11 +1447,12 @@@ if (bond->params.arp_interval) slave->dev->last_rx = jiffies;
- if (bond->recv_probe) { + recv_probe = ACCESS_ONCE(bond->recv_probe); + if (recv_probe) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) { - bond->recv_probe(nskb, bond, slave); + recv_probe(nskb, bond, slave); dev_kfree_skb(nskb); } } @@@ -3710,44 -3704,27 +3707,27 @@@ static bool bond_addr_in_mc_list(unsign return false; }
- static void bond_set_multicast_list(struct net_device *bond_dev) + static void bond_change_rx_flags(struct net_device *bond_dev, int change) { struct bonding *bond = netdev_priv(bond_dev); - struct netdev_hw_addr *ha; - bool found;
- /* - * Do promisc before checking multicast_mode - */ - if ((bond_dev->flags & IFF_PROMISC) && !(bond->flags & IFF_PROMISC)) - /* - * FIXME: Need to handle the error when one of the multi-slaves - * encounters error. - */ - bond_set_promiscuity(bond, 1); - - - if (!(bond_dev->flags & IFF_PROMISC) && (bond->flags & IFF_PROMISC)) - bond_set_promiscuity(bond, -1); - - - /* set allmulti flag to slaves */ - if ((bond_dev->flags & IFF_ALLMULTI) && !(bond->flags & IFF_ALLMULTI)) - /* - * FIXME: Need to handle the error when one of the multi-slaves - * encounters error. - */ - bond_set_allmulti(bond, 1); + if (change & IFF_PROMISC) + bond_set_promiscuity(bond, + bond_dev->flags & IFF_PROMISC ? 1 : -1);
+ if (change & IFF_ALLMULTI) + bond_set_allmulti(bond, + bond_dev->flags & IFF_ALLMULTI ? 1 : -1); + }
- if (!(bond_dev->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI)) - bond_set_allmulti(bond, -1); - + static void bond_set_multicast_list(struct net_device *bond_dev) + { + struct bonding *bond = netdev_priv(bond_dev); + struct netdev_hw_addr *ha; + bool found;
read_lock(&bond->lock);
- bond->flags = bond_dev->flags; - /* looking for addresses to add to slaves' mc list */ netdev_for_each_mc_addr(ha, bond_dev) { found = bond_addr_in_mc_list(ha->addr, &bond->mc_list, @@@ -4306,7 -4283,8 +4286,8 @@@ static const struct net_device_ops bond .ndo_select_queue = bond_select_queue, .ndo_get_stats64 = bond_get_stats, .ndo_do_ioctl = bond_do_ioctl, - .ndo_set_multicast_list = bond_set_multicast_list, + .ndo_change_rx_flags = bond_change_rx_flags, + .ndo_set_rx_mode = bond_set_multicast_list, .ndo_change_mtu = bond_change_mtu, .ndo_set_mac_address = bond_set_mac_address, .ndo_neigh_setup = bond_neigh_setup, @@@ -4852,11 -4830,20 +4833,20 @@@ static int bond_validate(struct nlattr return 0; }
+ static int bond_get_tx_queues(struct net *net, struct nlattr *tb[], + unsigned int *num_queues, + unsigned int *real_num_queues) + { + *num_queues = tx_queues; + return 0; + } + static struct rtnl_link_ops bond_link_ops __read_mostly = { .kind = "bond", .priv_size = sizeof(struct bonding), .setup = bond_setup, .validate = bond_validate, + .get_tx_queues = bond_get_tx_queues, };
/* Create a new bond based on the specified name and bonding parameters. @@@ -4901,6 -4888,7 +4891,7 @@@ static int __net_init bond_net_init(str INIT_LIST_HEAD(&bn->dev_list);
bond_create_proc_dir(bn); + bond_create_sysfs(bn); return 0; } @@@ -4909,6 -4897,7 +4900,7 @@@ static void __net_exit bond_net_exit(st { struct bond_net *bn = net_generic(net, bond_net_id);
+ bond_destroy_sysfs(bn); bond_destroy_proc_dir(bn); }
@@@ -4946,10 -4935,6 +4938,6 @@@ static int __init bonding_init(void goto err; }
- res = bond_create_sysfs(); - if (res) - goto err; - register_netdevice_notifier(&bond_netdev_notifier); register_inetaddr_notifier(&bond_inetaddr_notifier); out: @@@ -4967,7 -4952,6 +4955,6 @@@ static void __exit bonding_exit(void unregister_netdevice_notifier(&bond_netdev_notifier); unregister_inetaddr_notifier(&bond_inetaddr_notifier);
- bond_destroy_sysfs(); bond_destroy_debugfs();
rtnl_link_unregister(&bond_link_ops); diff --combined drivers/net/can/mscan/mscan.c index 4cc6f44,ac42f5d..ec4a311 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@@ -62,7 -62,7 +62,7 @@@ static enum can_state state_map[] = static int mscan_set_mode(struct net_device *dev, u8 mode) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; int ret = 0; int i; u8 canctl1; @@@ -138,7 -138,7 +138,7 @@@ static int mscan_start(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; u8 canrflg; int err;
@@@ -178,7 -178,7 +178,7 @@@ static int mscan_restart(struct net_dev struct mscan_priv *priv = netdev_priv(dev);
if (priv->type == MSCAN_TYPE_MPC5121) { - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base;
priv->can.state = CAN_STATE_ERROR_ACTIVE; WARN(!(in_8(®s->canmisc) & MSCAN_BOHOLD), @@@ -199,7 -199,7 +199,7 @@@ static netdev_tx_t mscan_start_xmit(str { struct can_frame *frame = (struct can_frame *)skb->data; struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; int i, rtr, buf_id; u32 can_id;
@@@ -261,13 -261,11 +261,13 @@@ void __iomem *data = ®s->tx.dsr1_0; u16 *payload = (u16 *)frame->data;
- /* It is safe to write into dsr[dlc+1] */ - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + for (i = 0; i < frame->can_dlc / 2; i++) { out_be16(data, *payload++); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } + /* write remaining byte if necessary */ + if (frame->can_dlc & 1) + out_8(data, frame->data[frame->can_dlc - 1]); }
out_8(®s->tx.dlr, frame->can_dlc); @@@ -307,7 -305,7 +307,7 @@@ static enum can_state check_set_state(s static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; u32 can_id; int i;
@@@ -332,13 -330,10 +332,13 @@@ void __iomem *data = ®s->rx.dsr1_0; u16 *payload = (u16 *)frame->data;
- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + for (i = 0; i < frame->can_dlc / 2; i++) { *payload++ = in_be16(data); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } + /* read remaining byte if necessary */ + if (frame->can_dlc & 1) + frame->data[frame->can_dlc - 1] = in_8(data); }
out_8(®s->canrflg, MSCAN_RXF); @@@ -348,7 -343,7 +348,7 @@@ static void mscan_get_err_frame(struct u8 canrflg) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; enum can_state old_state;
@@@ -411,7 -406,7 +411,7 @@@ static int mscan_rx_poll(struct napi_st { struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi); struct net_device *dev = napi->dev; - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; int npackets = 0; int ret = 1; @@@ -458,7 -453,7 +458,7 @@@ static irqreturn_t mscan_isr(int irq, v { struct net_device *dev = (struct net_device *)dev_id; struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; u8 cantier, cantflg, canrflg; irqreturn_t ret = IRQ_NONE; @@@ -542,7 -537,7 +542,7 @@@ static int mscan_do_set_mode(struct net static int mscan_do_set_bittiming(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; struct can_bittiming *bt = &priv->can.bittiming; u8 btr0, btr1;
@@@ -564,7 -559,7 +564,7 @@@ static int mscan_open(struct net_devic { int ret; struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base;
/* common open */ ret = open_candev(dev); @@@ -603,7 -598,7 +603,7 @@@ exit_napi_disable static int mscan_close(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base;
netif_stop_queue(dev); napi_disable(&priv->napi); @@@ -627,7 -622,7 +627,7 @@@ static const struct net_device_ops msca int register_mscandev(struct net_device *dev, int mscan_clksrc) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; u8 ctl1;
ctl1 = in_8(®s->canctl1); @@@ -664,7 -659,7 +664,7 @@@ void unregister_mscandev(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct mscan_regs __iomem *regs = priv->reg_base; mscan_set_mode(dev, MSCAN_INIT_MODE); clrbits8(®s->canctl1, MSCAN_CANE); unregister_candev(dev); diff --combined drivers/net/cris/eth_v10.c index 5c939ce,7cb2785..c7e1df9 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@@ -261,7 -261,7 +261,7 @@@ static const struct net_device_ops e100 .ndo_start_xmit = e100_send_packet, .ndo_tx_timeout = e100_tx_timeout, .ndo_get_stats = e100_get_stats, - .ndo_set_multicast_list = set_multicast_list, + .ndo_set_rx_mode = set_multicast_list, .ndo_do_ioctl = e100_ioctl, .ndo_set_mac_address = e100_set_mac_address, .ndo_validate_addr = eth_validate_addr, @@@ -1132,6 -1132,7 +1132,6 @@@ static irqreturn_ e100rxtx_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; - struct net_local *np = netdev_priv(dev); unsigned long irqbits;
/* diff --combined drivers/net/ethernet/amd/au1000_eth.c index 0000000,8238667..4865ff1 mode 000000,100644..100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@@ -1,0 -1,1332 +1,1356 @@@ + /* + * + * Alchemy Au1x00 ethernet driver + * + * Copyright 2001-2003, 2006 MontaVista Software Inc. + * Copyright 2002 TimeSys Corp. + * Added ethtool/mii-tool support, + * Copyright 2004 Matt Porter mporter@kernel.crashing.org + * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de + * or riemer@riemer-nt.de: fixed the link beat detection with + * ioctls (SIOCGMIIPHY) + * Copyright 2006 Herbert Valerio Riedel hvr@gnu.org + * converted to use linux-2.6.x's PHY framework + * + * Author: MontaVista Software, Inc. + * ppopov@mvista.com or source@mvista.com + * + * ######################################################################## + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * ######################################################################## + * + * + */ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include <linux/capability.h> + #include <linux/dma-mapping.h> + #include <linux/module.h> + #include <linux/kernel.h> + #include <linux/string.h> + #include <linux/timer.h> + #include <linux/errno.h> + #include <linux/in.h> + #include <linux/ioport.h> + #include <linux/bitops.h> + #include <linux/slab.h> + #include <linux/interrupt.h> + #include <linux/init.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/ethtool.h> + #include <linux/mii.h> + #include <linux/skbuff.h> + #include <linux/delay.h> + #include <linux/crc32.h> + #include <linux/phy.h> + #include <linux/platform_device.h> + #include <linux/cpu.h> + #include <linux/io.h> + + #include <asm/mipsregs.h> + #include <asm/irq.h> + #include <asm/processor.h> + + #include <au1000.h> + #include <au1xxx_eth.h> + #include <prom.h> + + #include "au1000_eth.h" + + #ifdef AU1000_ETH_DEBUG + static int au1000_debug = 5; + #else + static int au1000_debug = 3; + #endif + + #define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + + #define DRV_NAME "au1000_eth" + #define DRV_VERSION "1.7" + #define DRV_AUTHOR "Pete Popov ppopov@embeddedalley.com" + #define DRV_DESC "Au1xxx on-chip Ethernet driver" + + MODULE_AUTHOR(DRV_AUTHOR); + MODULE_DESCRIPTION(DRV_DESC); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_VERSION); + + /* + * Theory of operation + * + * The Au1000 MACs use a simple rx and tx descriptor ring scheme. + * There are four receive and four transmit descriptors. These + * descriptors are not in memory; rather, they are just a set of + * hardware registers. + * + * Since the Au1000 has a coherent data cache, the receive and + * transmit buffers are allocated from the KSEG0 segment. The + * hardware registers, however, are still mapped at KSEG1 to + * make sure there's no out-of-order writes, and that all writes + * complete immediately. + */ + + /* + * board-specific configurations + * + * PHY detection algorithm + * + * If phy_static_config is undefined, the PHY setup is + * autodetected: + * + * mii_probe() first searches the current MAC's MII bus for a PHY, + * selecting the first (or last, if phy_search_highest_addr is + * defined) PHY address not already claimed by another netdev. + * + * If nothing was found that way when searching for the 2nd ethernet + * controller's PHY and phy1_search_mac0 is defined, then + * the first MII bus is searched as well for an unclaimed PHY; this is + * needed in case of a dual-PHY accessible only through the MAC0's MII + * bus. + * + * Finally, if no PHY is found, then the corresponding ethernet + * controller is not registered to the network subsystem. + */ + + /* autodetection defaults: phy1_search_mac0 */ + + /* static PHY setup + * + * most boards PHY setup should be detectable properly with the + * autodetection algorithm in mii_probe(), but in some cases (e.g. if + * you have a switch attached, or want to use the PHY's interrupt + * notification capabilities) you can provide a static PHY + * configuration here + * + * IRQs may only be set, if a PHY address was configured + * If a PHY address is given, also a bus id is required to be set + * + * ps: make sure the used irqs are configured properly in the board + * specific irq-map + */ + + static void au1000_enable_mac(struct net_device *dev, int force_reset) + { + unsigned long flags; + struct au1000_private *aup = netdev_priv(dev); + + spin_lock_irqsave(&aup->lock, flags); + + if (force_reset || (!aup->mac_enabled)) { + writel(MAC_EN_CLOCK_ENABLE, aup->enable); + au_sync_delay(2); + writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 + | MAC_EN_CLOCK_ENABLE), aup->enable); + au_sync_delay(2); + + aup->mac_enabled = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); + } + + /* + * MII operations + */ + static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) + { + struct au1000_private *aup = netdev_priv(dev); + u32 *const mii_control_reg = &aup->mac->mii_control; + u32 *const mii_data_reg = &aup->mac->mii_data; + u32 timedout = 20; + u32 mii_control; + + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "read_MII busy timeout!!\n"); + return -1; + } + } + + mii_control = MAC_SET_MII_SELECT_REG(reg) | + MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ; + + writel(mii_control, mii_control_reg); + + timedout = 20; + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "mdio_read busy timeout!!\n"); + return -1; + } + } + return readl(mii_data_reg); + } + + static void au1000_mdio_write(struct net_device *dev, int phy_addr, + int reg, u16 value) + { + struct au1000_private *aup = netdev_priv(dev); + u32 *const mii_control_reg = &aup->mac->mii_control; + u32 *const mii_data_reg = &aup->mac->mii_data; + u32 timedout = 20; + u32 mii_control; + + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "mdio_write busy timeout!!\n"); + return; + } + } + + mii_control = MAC_SET_MII_SELECT_REG(reg) | + MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE; + + writel(value, mii_data_reg); + writel(mii_control, mii_control_reg); + } + + static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) + { + /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does + * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) + */ + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + return au1000_mdio_read(dev, phy_addr, regnum); + } + + static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) + { + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + au1000_mdio_write(dev, phy_addr, regnum, value); + return 0; + } + + static int au1000_mdiobus_reset(struct mii_bus *bus) + { + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + return 0; + } + + static void au1000_hard_stop(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, drv, dev, "hard stop\n"); + + reg = readl(&aup->mac->control); + reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); + writel(reg, &aup->mac->control); + au_sync_delay(10); + } + + static void au1000_enable_rx_tx(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, hw, dev, "enable_rx_tx\n"); + + reg = readl(&aup->mac->control); + reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE); + writel(reg, &aup->mac->control); + au_sync_delay(10); + } + + static void + au1000_adjust_link(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + struct phy_device *phydev = aup->phy_dev; + unsigned long flags; + u32 reg; + + int status_change = 0; + + BUG_ON(!aup->phy_dev); + + spin_lock_irqsave(&aup->lock, flags); + + if (phydev->link && (aup->old_speed != phydev->speed)) { + /* speed changed */ + + switch (phydev->speed) { + case SPEED_10: + case SPEED_100: + break; + default: + netdev_warn(dev, "Speed (%d) is not 10/100 ???\n", + phydev->speed); + break; + } + + aup->old_speed = phydev->speed; + + status_change = 1; + } + + if (phydev->link && (aup->old_duplex != phydev->duplex)) { + /* duplex mode changed */ + + /* switching duplex mode requires to disable rx and tx! */ + au1000_hard_stop(dev); + + reg = readl(&aup->mac->control); + if (DUPLEX_FULL == phydev->duplex) { + reg |= MAC_FULL_DUPLEX; + reg &= ~MAC_DISABLE_RX_OWN; + } else { + reg &= ~MAC_FULL_DUPLEX; + reg |= MAC_DISABLE_RX_OWN; + } + writel(reg, &aup->mac->control); + au_sync_delay(1); + + au1000_enable_rx_tx(dev); + aup->old_duplex = phydev->duplex; + + status_change = 1; + } + + if (phydev->link != aup->old_link) { + /* link state changed */ + + if (!phydev->link) { + /* link went down */ + aup->old_speed = 0; + aup->old_duplex = -1; + } + + aup->old_link = phydev->link; + status_change = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); + + if (status_change) { + if (phydev->link) + netdev_info(dev, "link up (%d/%s)\n", + phydev->speed, + DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); + else + netdev_info(dev, "link down\n"); + } + } + + static int au1000_mii_probe(struct net_device *dev) + { + struct au1000_private *const aup = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + + if (aup->phy_static_config) { + BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); + + if (aup->phy_addr) + phydev = aup->mii_bus->phy_map[aup->phy_addr]; + else + netdev_info(dev, "using PHY-less setup\n"); + return 0; + } + + /* find the first (lowest address) PHY + * on the current MAC's MII bus + */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) + if (aup->mii_bus->phy_map[phy_addr]) { + phydev = aup->mii_bus->phy_map[phy_addr]; + if (!aup->phy_search_highest_addr) + /* break out with first one found */ + break; + } + + if (aup->phy1_search_mac0) { + /* try harder to find a PHY */ + if (!phydev && (aup->mac_id == 1)) { + /* no PHY found, maybe we have a dual PHY? */ + dev_info(&dev->dev, ": no PHY found on MAC1, " + "let's see if it's attached to MAC0...\n"); + + /* find the first (lowest address) non-attached + * PHY on the MAC0 MII bus + */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + struct phy_device *const tmp_phydev = + aup->mii_bus->phy_map[phy_addr]; + + if (aup->mac_id == 1) + break; + + /* no PHY here... */ + if (!tmp_phydev) + continue; + + /* already claimed by MAC0 */ + if (tmp_phydev->attached_dev) + continue; + + phydev = tmp_phydev; + break; /* found it */ + } + } + } + + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -1; + } + + /* now we are supposed to have a proper phydev, to attach to... */ + BUG_ON(phydev->attached_dev); + + phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link, + 0, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + + aup->old_link = 0; + aup->old_speed = 0; + aup->old_duplex = -1; + aup->phy_dev = phydev; + + netdev_info(dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + return 0; + } + + + /* + * Buffer allocation/deallocation routines. The buffer descriptor returned + * has the virtual and dma address of a buffer suitable for + * both, receive and transmit operations. + */ + static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup) + { + struct db_dest *pDB; + pDB = aup->pDBfree; + + if (pDB) + aup->pDBfree = pDB->pnext; + + return pDB; + } + + void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB) + { + struct db_dest *pDBfree = aup->pDBfree; + if (pDBfree) + pDBfree->pnext = pDB; + aup->pDBfree = pDB; + } + + static void au1000_reset_mac_unlocked(struct net_device *dev) + { + struct au1000_private *const aup = netdev_priv(dev); + int i; + + au1000_hard_stop(dev); + + writel(MAC_EN_CLOCK_ENABLE, aup->enable); + au_sync_delay(2); + writel(0, aup->enable); + au_sync_delay(2); + + aup->tx_full = 0; + for (i = 0; i < NUM_RX_DMA; i++) { + /* reset control bits */ + aup->rx_dma_ring[i]->buff_stat &= ~0xf; + } + for (i = 0; i < NUM_TX_DMA; i++) { + /* reset control bits */ + aup->tx_dma_ring[i]->buff_stat &= ~0xf; + } + + aup->mac_enabled = 0; + + } + + static void au1000_reset_mac(struct net_device *dev) + { + struct au1000_private *const aup = netdev_priv(dev); + unsigned long flags; + + netif_dbg(aup, hw, dev, "reset mac, aup %x\n", + (unsigned)aup); + + spin_lock_irqsave(&aup->lock, flags); + + au1000_reset_mac_unlocked(dev); + + spin_unlock_irqrestore(&aup->lock, flags); + } + + /* + * Setup the receive and transmit "rings". These pointers are the addresses + * of the rx and tx MAC DMA registers so they are fixed by the hardware -- + * these are not descriptors sitting in memory. + */ + static void -au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) ++au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base) + { + int i; + + for (i = 0; i < NUM_RX_DMA; i++) { - aup->rx_dma_ring[i] = - (struct rx_dma *) - (rx_base + sizeof(struct rx_dma)*i); ++ aup->rx_dma_ring[i] = (struct rx_dma *) ++ (tx_base + 0x100 + sizeof(struct rx_dma) * i); + } + for (i = 0; i < NUM_TX_DMA; i++) { - aup->tx_dma_ring[i] = - (struct tx_dma *) - (tx_base + sizeof(struct tx_dma)*i); ++ aup->tx_dma_ring[i] = (struct tx_dma *) ++ (tx_base + sizeof(struct tx_dma) * i); + } + } + + /* + * ethtool operations + */ + + static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct au1000_private *aup = netdev_priv(dev); + + if (aup->phy_dev) + return phy_ethtool_gset(aup->phy_dev, cmd); + + return -EINVAL; + } + + static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct au1000_private *aup = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (aup->phy_dev) + return phy_ethtool_sset(aup->phy_dev, cmd); + + return -EINVAL; + } + + static void + au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) + { + struct au1000_private *aup = netdev_priv(dev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + info->fw_version[0] = '\0'; + sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id); + info->regdump_len = 0; + } + + static void au1000_set_msglevel(struct net_device *dev, u32 value) + { + struct au1000_private *aup = netdev_priv(dev); + aup->msg_enable = value; + } + + static u32 au1000_get_msglevel(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + return aup->msg_enable; + } + + static const struct ethtool_ops au1000_ethtool_ops = { + .get_settings = au1000_get_settings, + .set_settings = au1000_set_settings, + .get_drvinfo = au1000_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = au1000_get_msglevel, + .set_msglevel = au1000_set_msglevel, + }; + + + /* + * Initialize the interface. + * + * When the device powers up, the clocks are disabled and the + * mac is in reset state. When the interface is closed, we + * do the same -- reset the device and disable the clocks to + * conserve power. Thus, whenever au1000_init() is called, + * the device should already be in reset state. + */ + static int au1000_init(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + unsigned long flags; + int i; + u32 control; + + netif_dbg(aup, hw, dev, "au1000_init\n"); + + /* bring the device out of reset */ + au1000_enable_mac(dev, 1); + + spin_lock_irqsave(&aup->lock, flags); + + writel(0, &aup->mac->control); + aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2; + aup->tx_tail = aup->tx_head; + aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2; + + writel(dev->dev_addr[5]<<8 | dev->dev_addr[4], + &aup->mac->mac_addr_high); + writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 | + dev->dev_addr[1]<<8 | dev->dev_addr[0], + &aup->mac->mac_addr_low); + + + for (i = 0; i < NUM_RX_DMA; i++) + aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE; + + au_sync(); + + control = MAC_RX_ENABLE | MAC_TX_ENABLE; + #ifndef CONFIG_CPU_LITTLE_ENDIAN + control |= MAC_BIG_ENDIAN; + #endif + if (aup->phy_dev) { + if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex)) + control |= MAC_FULL_DUPLEX; + else + control |= MAC_DISABLE_RX_OWN; + } else { /* PHY-less op, assume full-duplex */ + control |= MAC_FULL_DUPLEX; + } + + writel(control, &aup->mac->control); + writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */ + au_sync(); + + spin_unlock_irqrestore(&aup->lock, flags); + return 0; + } + + static inline void au1000_update_rx_stats(struct net_device *dev, u32 status) + { + struct net_device_stats *ps = &dev->stats; + + ps->rx_packets++; + if (status & RX_MCAST_FRAME) + ps->multicast++; + + if (status & RX_ERROR) { + ps->rx_errors++; + if (status & RX_MISSED_FRAME) + ps->rx_missed_errors++; + if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) + ps->rx_length_errors++; + if (status & RX_CRC_ERROR) + ps->rx_crc_errors++; + if (status & RX_COLL) + ps->collisions++; + } else + ps->rx_bytes += status & RX_FRAME_LEN_MASK; + + } + + /* + * Au1000 receive routine. + */ + static int au1000_rx(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + struct sk_buff *skb; + struct rx_dma *prxd; + u32 buff_stat, status; + struct db_dest *pDB; + u32 frmlen; + + netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head); + + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; + while (buff_stat & RX_T_DONE) { + status = prxd->status; + pDB = aup->rx_db_inuse[aup->rx_head]; + au1000_update_rx_stats(dev, status); + if (!(status & RX_ERROR)) { + + /* good frame */ + frmlen = (status & RX_FRAME_LEN_MASK); + frmlen -= 4; /* Remove FCS */ + skb = dev_alloc_skb(frmlen + 2); + if (skb == NULL) { + netdev_err(dev, "Memory squeeze, dropping packet.\n"); + dev->stats.rx_dropped++; + continue; + } + skb_reserve(skb, 2); /* 16 byte IP header align */ + skb_copy_to_linear_data(skb, + (unsigned char *)pDB->vaddr, frmlen); + skb_put(skb, frmlen); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); /* pass the packet to upper layers */ + } else { + if (au1000_debug > 4) { + pr_err("rx_error(s):"); + if (status & RX_MISSED_FRAME) + pr_cont(" miss"); + if (status & RX_WDOG_TIMER) + pr_cont(" wdog"); + if (status & RX_RUNT) + pr_cont(" runt"); + if (status & RX_OVERLEN) + pr_cont(" overlen"); + if (status & RX_COLL) + pr_cont(" coll"); + if (status & RX_MII_ERROR) + pr_cont(" mii error"); + if (status & RX_CRC_ERROR) + pr_cont(" crc error"); + if (status & RX_LEN_ERROR) + pr_cont(" len error"); + if (status & RX_U_CNTRL_FRAME) + pr_cont(" u control frame"); + pr_cont("\n"); + } + } + prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); + aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); + au_sync(); + + /* next descriptor */ + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; + } + return 0; + } + + static void au1000_update_tx_stats(struct net_device *dev, u32 status) + { + struct au1000_private *aup = netdev_priv(dev); + struct net_device_stats *ps = &dev->stats; + + if (status & TX_FRAME_ABORTED) { + if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { + if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { + /* any other tx errors are only valid + * in half duplex mode + */ + ps->tx_errors++; + ps->tx_aborted_errors++; + } + } else { + ps->tx_errors++; + ps->tx_aborted_errors++; + if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) + ps->tx_carrier_errors++; + } + } + } + + /* + * Called from the interrupt service routine to acknowledge + * the TX DONE bits. This is a must if the irq is setup as + * edge triggered. + */ + static void au1000_tx_ack(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + struct tx_dma *ptxd; + + ptxd = aup->tx_dma_ring[aup->tx_tail]; + + while (ptxd->buff_stat & TX_T_DONE) { + au1000_update_tx_stats(dev, ptxd->status); + ptxd->buff_stat &= ~TX_T_DONE; + ptxd->len = 0; + au_sync(); + + aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); + ptxd = aup->tx_dma_ring[aup->tx_tail]; + + if (aup->tx_full) { + aup->tx_full = 0; + netif_wake_queue(dev); + } + } + } + + /* + * Au1000 interrupt service routine. + */ + static irqreturn_t au1000_interrupt(int irq, void *dev_id) + { + struct net_device *dev = dev_id; + + /* Handle RX interrupts first to minimize chance of overrun */ + + au1000_rx(dev); + au1000_tx_ack(dev); + return IRQ_RETVAL(1); + } + + static int au1000_open(struct net_device *dev) + { + int retval; + struct au1000_private *aup = netdev_priv(dev); + + netif_dbg(aup, drv, dev, "open: dev=%p\n", dev); + + retval = request_irq(dev->irq, au1000_interrupt, 0, + dev->name, dev); + if (retval) { + netdev_err(dev, "unable to get IRQ %d\n", dev->irq); + return retval; + } + + retval = au1000_init(dev); + if (retval) { + netdev_err(dev, "error in au1000_init\n"); + free_irq(dev->irq, dev); + return retval; + } + + if (aup->phy_dev) { + /* cause the PHY state machine to schedule a link state check */ + aup->phy_dev->state = PHY_CHANGELINK; + phy_start(aup->phy_dev); + } + + netif_start_queue(dev); + + netif_dbg(aup, drv, dev, "open: Initialization done.\n"); + + return 0; + } + + static int au1000_close(struct net_device *dev) + { + unsigned long flags; + struct au1000_private *const aup = netdev_priv(dev); + + netif_dbg(aup, drv, dev, "close: dev=%p\n", dev); + + if (aup->phy_dev) + phy_stop(aup->phy_dev); + + spin_lock_irqsave(&aup->lock, flags); + + au1000_reset_mac_unlocked(dev); + + /* stop the device */ + netif_stop_queue(dev); + + /* disable the interrupt */ + free_irq(dev->irq, dev); + spin_unlock_irqrestore(&aup->lock, flags); + + return 0; + } + + /* + * Au1000 transmit routine. + */ + static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + struct net_device_stats *ps = &dev->stats; + struct tx_dma *ptxd; + u32 buff_stat; + struct db_dest *pDB; + int i; + + netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n", + (unsigned)aup, skb->len, + skb->data, aup->tx_head); + + ptxd = aup->tx_dma_ring[aup->tx_head]; + buff_stat = ptxd->buff_stat; + if (buff_stat & TX_DMA_ENABLE) { + /* We've wrapped around and the transmitter is still busy */ + netif_stop_queue(dev); + aup->tx_full = 1; + return NETDEV_TX_BUSY; + } else if (buff_stat & TX_T_DONE) { + au1000_update_tx_stats(dev, ptxd->status); + ptxd->len = 0; + } + + if (aup->tx_full) { + aup->tx_full = 0; + netif_wake_queue(dev); + } + + pDB = aup->tx_db_inuse[aup->tx_head]; + skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); + if (skb->len < ETH_ZLEN) { + for (i = skb->len; i < ETH_ZLEN; i++) + ((char *)pDB->vaddr)[i] = 0; + + ptxd->len = ETH_ZLEN; + } else + ptxd->len = skb->len; + + ps->tx_packets++; + ps->tx_bytes += ptxd->len; + + ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; + au_sync(); + dev_kfree_skb(skb); + aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); + return NETDEV_TX_OK; + } + + /* + * The Tx ring has been full longer than the watchdog timeout + * value. The transmitter must be hung? + */ + static void au1000_tx_timeout(struct net_device *dev) + { + netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev); + au1000_reset_mac(dev); + au1000_init(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + } + + static void au1000_multicast_list(struct net_device *dev) + { + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags); + reg = readl(&aup->mac->control); + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + reg |= MAC_PROMISCUOUS; + } else if ((dev->flags & IFF_ALLMULTI) || + netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { + reg |= MAC_PASS_ALL_MULTI; + reg &= ~MAC_PROMISCUOUS; + netdev_info(dev, "Pass all multicast\n"); + } else { + struct netdev_hw_addr *ha; + u32 mc_filter[2]; /* Multicast hash filter */ + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) + set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, + (long *)mc_filter); + writel(mc_filter[1], &aup->mac->multi_hash_high); + writel(mc_filter[0], &aup->mac->multi_hash_low); + reg &= ~MAC_PROMISCUOUS; + reg |= MAC_HASH_MODE; + } + writel(reg, &aup->mac->control); + } + + static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + { + struct au1000_private *aup = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!aup->phy_dev) + return -EINVAL; /* PHY not controllable */ + + return phy_mii_ioctl(aup->phy_dev, rq, cmd); + } + + static const struct net_device_ops au1000_netdev_ops = { + .ndo_open = au1000_open, + .ndo_stop = au1000_close, + .ndo_start_xmit = au1000_tx, + .ndo_set_rx_mode = au1000_multicast_list, + .ndo_do_ioctl = au1000_ioctl, + .ndo_tx_timeout = au1000_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + }; + + static int __devinit au1000_probe(struct platform_device *pdev) + { + static unsigned version_printed; + struct au1000_private *aup = NULL; + struct au1000_eth_platform_data *pd; + struct net_device *dev = NULL; + struct db_dest *pDB, *pDBfree; + int irq, i, err = 0; - struct resource *base, *macen; ++ struct resource *base, *macen, *macdma; + + base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!base) { + dev_err(&pdev->dev, "failed to retrieve base register\n"); + err = -ENODEV; + goto out; + } + + macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!macen) { + dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n"); + err = -ENODEV; + goto out; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to retrieve IRQ\n"); + err = -ENODEV; + goto out; + } + ++ macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ if (!macdma) { ++ dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n"); ++ err = -ENODEV; ++ goto out; ++ } ++ + if (!request_mem_region(base->start, resource_size(base), + pdev->name)) { + dev_err(&pdev->dev, "failed to request memory region for base registers\n"); + err = -ENXIO; + goto out; + } + + if (!request_mem_region(macen->start, resource_size(macen), + pdev->name)) { + dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); + err = -ENXIO; + goto err_request; + } + ++ if (!request_mem_region(macdma->start, resource_size(macdma), ++ pdev->name)) { ++ dev_err(&pdev->dev, "failed to request MACDMA memory region\n"); ++ err = -ENXIO; ++ goto err_macdma; ++ } ++ + dev = alloc_etherdev(sizeof(struct au1000_private)); + if (!dev) { + dev_err(&pdev->dev, "alloc_etherdev failed\n"); + err = -ENOMEM; + goto err_alloc; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + platform_set_drvdata(pdev, dev); + aup = netdev_priv(dev); + + spin_lock_init(&aup->lock); + aup->msg_enable = (au1000_debug < 4 ? + AU1000_DEF_MSG_ENABLE : au1000_debug); + + /* Allocate the data buffers + * Snooping works fine with eth on all au1xxx + */ + aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0); + if (!aup->vaddr) { + dev_err(&pdev->dev, "failed to allocate data buffers\n"); + err = -ENOMEM; + goto err_vaddr; + } + + /* aup->mac is the base address of the MAC's registers */ + aup->mac = (struct mac_reg *) + ioremap_nocache(base->start, resource_size(base)); + if (!aup->mac) { + dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); + err = -ENXIO; + goto err_remap1; + } + + /* Setup some variables for quick register address access */ + aup->enable = (u32 *)ioremap_nocache(macen->start, + resource_size(macen)); + if (!aup->enable) { + dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); + err = -ENXIO; + goto err_remap2; + } + aup->mac_id = pdev->id; + - if (pdev->id == 0) - au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); - else if (pdev->id == 1) - au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); ++ aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma)); ++ if (!aup->macdma) { ++ dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n"); ++ err = -ENXIO; ++ goto err_remap3; ++ } ++ ++ au1000_setup_hw_rings(aup, aup->macdma); + + /* set a random MAC now in case platform_data doesn't provide one */ + random_ether_addr(dev->dev_addr); + + writel(0, aup->enable); + aup->mac_enabled = 0; + + pd = pdev->dev.platform_data; + if (!pd) { + dev_info(&pdev->dev, "no platform_data passed," + " PHY search on MAC0\n"); + aup->phy1_search_mac0 = 1; + } else { + if (is_valid_ether_addr(pd->mac)) + memcpy(dev->dev_addr, pd->mac, 6); + + aup->phy_static_config = pd->phy_static_config; + aup->phy_search_highest_addr = pd->phy_search_highest_addr; + aup->phy1_search_mac0 = pd->phy1_search_mac0; + aup->phy_addr = pd->phy_addr; + aup->phy_busid = pd->phy_busid; + aup->phy_irq = pd->phy_irq; + } + + if (aup->phy_busid && aup->phy_busid > 0) { + dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n"); + err = -ENODEV; + goto err_mdiobus_alloc; + } + + aup->mii_bus = mdiobus_alloc(); + if (aup->mii_bus == NULL) { + dev_err(&pdev->dev, "failed to allocate mdiobus structure\n"); + err = -ENOMEM; + goto err_mdiobus_alloc; + } + + aup->mii_bus->priv = dev; + aup->mii_bus->read = au1000_mdiobus_read; + aup->mii_bus->write = au1000_mdiobus_write; + aup->mii_bus->reset = au1000_mdiobus_reset; + aup->mii_bus->name = "au1000_eth_mii"; + snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id); + aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (aup->mii_bus->irq == NULL) + goto err_out; + + for (i = 0; i < PHY_MAX_ADDR; ++i) + aup->mii_bus->irq[i] = PHY_POLL; + /* if known, set corresponding PHY IRQs */ + if (aup->phy_static_config) + if (aup->phy_irq && aup->phy_busid == aup->mac_id) + aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq; + + err = mdiobus_register(aup->mii_bus); + if (err) { + dev_err(&pdev->dev, "failed to register MDIO bus\n"); + goto err_mdiobus_reg; + } + + if (au1000_mii_probe(dev) != 0) + goto err_out; + + pDBfree = NULL; + /* setup the data buffer descriptors and attach a buffer to each one */ + pDB = aup->db; + for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { + pDB->pnext = pDBfree; + pDBfree = pDB; + pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); + pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); + pDB++; + } + aup->pDBfree = pDBfree; + + for (i = 0; i < NUM_RX_DMA; i++) { + pDB = au1000_GetFreeDB(aup); + if (!pDB) + goto err_out; + + aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->rx_db_inuse[i] = pDB; + } + for (i = 0; i < NUM_TX_DMA; i++) { + pDB = au1000_GetFreeDB(aup); + if (!pDB) + goto err_out; + + aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->tx_dma_ring[i]->len = 0; + aup->tx_db_inuse[i] = pDB; + } + + dev->base_addr = base->start; + dev->irq = irq; + dev->netdev_ops = &au1000_netdev_ops; + SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); + dev->watchdog_timeo = ETH_TX_TIMEOUT; + + /* + * The boot code uses the ethernet controller, so reset it to start + * fresh. au1000_init() expects that the device is in reset state. + */ + au1000_reset_mac(dev); + + err = register_netdev(dev); + if (err) { + netdev_err(dev, "Cannot register net device, aborting.\n"); + goto err_out; + } + + netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", + (unsigned long)base->start, irq); + if (version_printed++ == 0) + pr_info("%s version %s %s\n", + DRV_NAME, DRV_VERSION, DRV_AUTHOR); + + return 0; + + err_out: + if (aup->mii_bus != NULL) + mdiobus_unregister(aup->mii_bus); + + /* here we should have a valid dev plus aup-> register addresses + * so we can reset the mac properly. + */ + au1000_reset_mac(dev); + + for (i = 0; i < NUM_RX_DMA; i++) { + if (aup->rx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); + } + for (i = 0; i < NUM_TX_DMA; i++) { + if (aup->tx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); + } + err_mdiobus_reg: + mdiobus_free(aup->mii_bus); + err_mdiobus_alloc: ++ iounmap(aup->macdma); ++err_remap3: + iounmap(aup->enable); + err_remap2: + iounmap(aup->mac); + err_remap1: + dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); + err_vaddr: + free_netdev(dev); + err_alloc: ++ release_mem_region(macdma->start, resource_size(macdma)); ++err_macdma: + release_mem_region(macen->start, resource_size(macen)); + err_request: + release_mem_region(base->start, resource_size(base)); + out: + return err; + } + + static int __devexit au1000_remove(struct platform_device *pdev) + { + struct net_device *dev = platform_get_drvdata(pdev); + struct au1000_private *aup = netdev_priv(dev); + int i; + struct resource *base, *macen; + + platform_set_drvdata(pdev, NULL); + + unregister_netdev(dev); + mdiobus_unregister(aup->mii_bus); + mdiobus_free(aup->mii_bus); + + for (i = 0; i < NUM_RX_DMA; i++) + if (aup->rx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); + + for (i = 0; i < NUM_TX_DMA; i++) + if (aup->tx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); + + dma_free_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); + ++ iounmap(aup->macdma); + iounmap(aup->mac); + iounmap(aup->enable); + ++ base = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ release_mem_region(base->start, resource_size(base)); ++ + base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(base->start, resource_size(base)); + + macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); + release_mem_region(macen->start, resource_size(macen)); + + free_netdev(dev); + + return 0; + } + + static struct platform_driver au1000_eth_driver = { + .probe = au1000_probe, + .remove = __devexit_p(au1000_remove), + .driver = { + .name = "au1000-eth", + .owner = THIS_MODULE, + }, + }; + MODULE_ALIAS("platform:au1000-eth"); + + + static int __init au1000_init_module(void) + { + return platform_driver_register(&au1000_eth_driver); + } + + static void __exit au1000_exit_module(void) + { + platform_driver_unregister(&au1000_eth_driver); + } + + module_init(au1000_init_module); + module_exit(au1000_exit_module); diff --combined drivers/net/ethernet/amd/au1000_eth.h index 4b7f7ad,6229c77..4b7f7ad --- a/drivers/net/ethernet/amd/au1000_eth.h +++ b/drivers/net/ethernet/amd/au1000_eth.h @@@ -124,7 -124,7 +124,7 @@@ struct au1000_private */ struct mac_reg *mac; /* mac registers */ u32 *enable; /* address of MAC Enable Register */ - + void __iomem *macdma; /* base of MAC DMA port */ u32 vaddr; /* virtual address of rx/tx buffers */ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 0000000,2f92487..627a580 mode 000000,100644..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@@ -1,0 -1,2069 +1,2075 @@@ + /* bnx2x.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein eilong@broadcom.com + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + */ + + #ifndef BNX2X_H + #define BNX2X_H + #include <linux/netdevice.h> + #include <linux/dma-mapping.h> + #include <linux/types.h> + + /* compilation time flags */ + + /* define this to make the driver freeze on error to allow getting debug info + * (you will need to reboot afterwards) */ + /* #define BNX2X_STOP_ON_ERROR */ + + #define DRV_MODULE_VERSION "1.70.00-0" + #define DRV_MODULE_RELDATE "2011/06/13" + #define BNX2X_BC_VER 0x040200 + + #if defined(CONFIG_DCB) + #define BCM_DCBNL + #endif + #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) + #define BCM_CNIC 1 + #include "../cnic_if.h" + #endif + + #ifdef BCM_CNIC + #define BNX2X_MIN_MSIX_VEC_CNT 3 + #define BNX2X_MSIX_VEC_FP_START 2 + #else + #define BNX2X_MIN_MSIX_VEC_CNT 2 + #define BNX2X_MSIX_VEC_FP_START 1 + #endif + + #include <linux/mdio.h> + + #include "bnx2x_reg.h" + #include "bnx2x_fw_defs.h" + #include "bnx2x_hsi.h" + #include "bnx2x_link.h" + #include "bnx2x_sp.h" + #include "bnx2x_dcb.h" + #include "bnx2x_stats.h" + + /* error/debug prints */ + + #define DRV_MODULE_NAME "bnx2x" + + /* for messages that are currently off */ + #define BNX2X_MSG_OFF 0 + #define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */ + #define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */ + #define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */ + #define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */ + #define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ + #define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ + + /* regular debug print */ + #define DP(__mask, fmt, ...) \ + do { \ + if (bp->msg_enable & (__mask)) \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ + } while (0) + + #define DP_CONT(__mask, fmt, ...) \ + do { \ + if (bp->msg_enable & (__mask)) \ + pr_cont(fmt, ##__VA_ARGS__); \ + } while (0) + + /* errors debug print */ + #define BNX2X_DBG_ERR(fmt, ...) \ + do { \ + if (netif_msg_probe(bp)) \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ + } while (0) + + /* for errors (never masked) */ + #define BNX2X_ERR(fmt, ...) \ + do { \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ + } while (0) + + #define BNX2X_ERROR(fmt, ...) \ + pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) + + + /* before we have a dev->name use dev_info() */ + #define BNX2X_DEV_INFO(fmt, ...) \ + do { \ + if (netif_msg_probe(bp)) \ + dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \ + } while (0) + + #ifdef BNX2X_STOP_ON_ERROR + void bnx2x_int_disable(struct bnx2x *bp); + #define bnx2x_panic() \ + do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_int_disable(bp); \ + bnx2x_panic_dump(bp); \ + } while (0) + #else + #define bnx2x_panic() \ + do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_panic_dump(bp); \ + } while (0) + #endif + + #define bnx2x_mc_addr(ha) ((ha)->addr) + #define bnx2x_uc_addr(ha) ((ha)->addr) + + #define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) + #define U64_HI(x) (u32)(((u64)(x)) >> 32) + #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) + + + #define REG_ADDR(bp, offset) ((bp->regview) + (offset)) + + #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) + #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) + #define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset)) + + #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) + #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) + #define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) + + #define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset) + #define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val) + + #define REG_RD_DMAE(bp, offset, valp, len32) \ + do { \ + bnx2x_read_dmae(bp, offset, len32);\ + memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \ + } while (0) + + #define REG_WR_DMAE(bp, offset, valp, len32) \ + do { \ + memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \ + bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \ + offset, len32); \ + } while (0) + + #define REG_WR_DMAE_LEN(bp, offset, valp, len32) \ + REG_WR_DMAE(bp, offset, valp, len32) + + #define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ + do { \ + memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ + bnx2x_write_big_buf_wb(bp, addr, len32); \ + } while (0) + + #define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \ + offsetof(struct shmem_region, field)) + #define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) + #define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) + + #define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \ + offsetof(struct shmem2_region, field)) + #define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) + #define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) + #define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \ + offsetof(struct mf_cfg, field)) + #define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \ + offsetof(struct mf2_cfg, field)) + + #define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field)) + #define MF_CFG_WR(bp, field, val) REG_WR(bp,\ + MF_CFG_ADDR(bp, field), (val)) + #define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field)) + + #define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \ + (SHMEM2_RD((bp), size) > \ + offsetof(struct shmem2_region, field))) + + #define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) + #define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) + + /* SP SB indices */ + + /* General SP events - stats query, cfc delete, etc */ + #define HC_SP_INDEX_ETH_DEF_CONS 3 + + /* EQ completions */ + #define HC_SP_INDEX_EQ_CONS 7 + + /* FCoE L2 connection completions */ + #define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 + #define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 + /* iSCSI L2 */ + #define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 + #define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 + + /* Special clients parameters */ + + /* SB indices */ + /* FCoE L2 */ + #define BNX2X_FCOE_L2_RX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS]) + + #define BNX2X_FCOE_L2_TX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS]) + + /** + * CIDs and CLIDs: + * CLIDs below is a CLID for func 0, then the CLID for other + * functions will be calculated by the formula: + * + * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X + * + */ -/* iSCSI L2 */ -#define BNX2X_ISCSI_ETH_CL_ID_IDX 1 -#define BNX2X_ISCSI_ETH_CID 49 - -/* FCoE L2 */ -#define BNX2X_FCOE_ETH_CL_ID_IDX 2 -#define BNX2X_FCOE_ETH_CID 50 ++enum { ++ BNX2X_ISCSI_ETH_CL_ID_IDX, ++ BNX2X_FCOE_ETH_CL_ID_IDX, ++ BNX2X_MAX_CNIC_ETH_CL_ID_IDX, ++}; ++ ++#define BNX2X_CNIC_START_ETH_CID 48 ++enum { ++ /* iSCSI L2 */ ++ BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, ++ /* FCoE L2 */ ++ BNX2X_FCOE_ETH_CID, ++}; + + /** Additional rings budgeting */ + #ifdef BCM_CNIC + #define CNIC_PRESENT 1 + #define FCOE_PRESENT 1 + #else + #define CNIC_PRESENT 0 + #define FCOE_PRESENT 0 + #endif /* BCM_CNIC */ + #define NON_ETH_CONTEXT_USE (FCOE_PRESENT) + + #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR + + #define SM_RX_ID 0 + #define SM_TX_ID 1 + + /* defines for multiple tx priority indices */ + #define FIRST_TX_ONLY_COS_INDEX 1 + #define FIRST_TX_COS_INDEX 0 + + /* defines for decodeing the fastpath index and the cos index out of the + * transmission queue index + */ + #define MAX_TXQS_PER_COS FP_SB_MAX_E1x + + #define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS) + #define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS) + + /* rules for calculating the cids of tx-only connections */ + #define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) + #define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) + + /* fp index inside class of service range */ + #define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) + + /* + * 0..15 eth cos0 + * 16..31 eth cos1 if applicable + * 32..47 eth cos2 If applicable + * fcoe queue follows eth queues (16, 32, 48 depending on cos) + */ + #define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) + #define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) + + /* fast path */ + struct sw_rx_bd { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + }; + + struct sw_tx_bd { + struct sk_buff *skb; + u16 first_bd; + u8 flags; + /* Set on the first BD descriptor when there is a split BD */ + #define BNX2X_TSO_SPLIT_BD (1<<0) + }; + + struct sw_rx_page { + struct page *page; + DEFINE_DMA_UNMAP_ADDR(mapping); + }; + + union db_prod { + struct doorbell_set_prod data; + u32 raw; + }; + + /* dropless fc FW/HW related params */ + #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) + #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ + ETH_MAX_AGGREGATION_QUEUES_E1 :\ + ETH_MAX_AGGREGATION_QUEUES_E1H_E2) + #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) + #define FW_PREFETCH_CNT 16 + #define DROPLESS_FC_HEADROOM 100 + + /* MC hsi */ + #define BCM_PAGE_SHIFT 12 + #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) + #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) + #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) + + #define PAGES_PER_SGE_SHIFT 0 + #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) + #define SGE_PAGE_SIZE PAGE_SIZE + #define SGE_PAGE_SHIFT PAGE_SHIFT + #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) + + /* SGE ring related macros */ + #define NUM_RX_SGE_PAGES 2 + #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) + #define NEXT_PAGE_SGE_DESC_CNT 2 + #define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) + /* RX_SGE_CNT is promised to be a power of 2 */ + #define RX_SGE_MASK (RX_SGE_CNT - 1) + #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) + #define MAX_RX_SGE (NUM_RX_SGE - 1) + #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ + (MAX_RX_SGE_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ + (x) + 1) + #define RX_SGE(x) ((x) & MAX_RX_SGE) + + /* + * Number of required SGEs is the sum of two: + * 1. Number of possible opened aggregations (next packet for + * these aggregations will probably consume SGE immidiatelly) + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only + * after placement on BD for new TPA aggregation) + * + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page + */ + #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ + (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) + #define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ + MAX_RX_SGE_CNT) + #define SGE_TH_LO(bp) (NUM_SGE_REQ + \ + NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) + #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) + + /* Manipulate a bit vector defined as an array of u64 */ + + /* Number of bits in one sge_mask array element */ + #define BIT_VEC64_ELEM_SZ 64 + #define BIT_VEC64_ELEM_SHIFT 6 + #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) + + + #define __BIT_VEC64_SET_BIT(el, bit) \ + do { \ + el = ((el) | ((u64)0x1 << (bit))); \ + } while (0) + + #define __BIT_VEC64_CLEAR_BIT(el, bit) \ + do { \ + el = ((el) & (~((u64)0x1 << (bit)))); \ + } while (0) + + + #define BIT_VEC64_SET_BIT(vec64, idx) \ + __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + + #define BIT_VEC64_CLEAR_BIT(vec64, idx) \ + __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + + #define BIT_VEC64_TEST_BIT(vec64, idx) \ + (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ + ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) + + /* Creates a bitmask of all ones in less significant bits. + idx - index of the most significant bit in the created mask */ + #define BIT_VEC64_ONES_MASK(idx) \ + (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) + #define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) + + /*******************************************************/ + + + + /* Number of u64 elements in SGE mask array */ + #define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ + BIT_VEC64_ELEM_SZ) + #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) + #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) + + union host_hc_status_block { + /* pointer to fp status block e1x */ + struct host_hc_status_block_e1x *e1x_sb; + /* pointer to fp status block e2 */ + struct host_hc_status_block_e2 *e2_sb; + }; + + struct bnx2x_agg_info { + /* + * First aggregation buffer is an skb, the following - are pages. + * We will preallocate the skbs for each aggregation when + * we open the interface and will replace the BD at the consumer + * with this one when we receive the TPA_START CQE in order to + * keep the Rx BD ring consistent. + */ + struct sw_rx_bd first_buf; + u8 tpa_state; + #define BNX2X_TPA_START 1 + #define BNX2X_TPA_STOP 2 + #define BNX2X_TPA_ERROR 3 + u8 placement_offset; + u16 parsing_flags; + u16 vlan_tag; + u16 len_on_bd; + }; + + #define Q_STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) + + struct bnx2x_fp_txdata { + + struct sw_tx_bd *tx_buf_ring; + + union eth_tx_bd_types *tx_desc_ring; + dma_addr_t tx_desc_mapping; + + u32 cid; + + union db_prod tx_db; + + u16 tx_pkt_prod; + u16 tx_pkt_cons; + u16 tx_bd_prod; + u16 tx_bd_cons; + + unsigned long tx_pkt; + + __le16 *tx_cons_sb; + + int txq_index; + }; + + struct bnx2x_fastpath { + struct bnx2x *bp; /* parent */ + + #define BNX2X_NAPI_WEIGHT 128 + struct napi_struct napi; + union host_hc_status_block status_blk; + /* chip independed shortcuts into sb structure */ + __le16 *sb_index_values; + __le16 *sb_running_index; + /* chip independed shortcut into rx_prods_offset memory */ + u32 ustorm_rx_prods_offset; + + u32 rx_buf_size; + + dma_addr_t status_blk_mapping; + + u8 max_cos; /* actual number of active tx coses */ + struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; + + struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ + struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ + + struct eth_rx_bd *rx_desc_ring; + dma_addr_t rx_desc_mapping; + + union eth_rx_cqe *rx_comp_ring; + dma_addr_t rx_comp_mapping; + + /* SGE ring */ + struct eth_rx_sge *rx_sge_ring; + dma_addr_t rx_sge_mapping; + + u64 sge_mask[RX_SGE_MASK_LEN]; + + u32 cid; + + __le16 fp_hc_idx; + + u8 index; /* number in fp array */ + u8 cl_id; /* eth client id */ + u8 cl_qzone_id; + u8 fw_sb_id; /* status block number in FW */ + u8 igu_sb_id; /* status block number in HW */ + + u16 rx_bd_prod; + u16 rx_bd_cons; + u16 rx_comp_prod; + u16 rx_comp_cons; + u16 rx_sge_prod; + /* The last maximal completed SGE */ + u16 last_max_sge; + __le16 *rx_cons_sb; + unsigned long rx_pkt, + rx_calls; + + /* TPA related */ + struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; + u8 disable_tpa; + #ifdef BNX2X_STOP_ON_ERROR + u64 tpa_queue_used; + #endif + + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + + /* The size is calculated using the following: + sizeof name field from netdev structure + + 4 ('-Xx-' string) + + 4 (for the digits and to make it DWORD aligned) */ + #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) + char name[FP_NAME_SIZE]; + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* Queue State object */ + struct bnx2x_queue_sp_obj q_obj; + + }; + + #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) + + /* Use 2500 as a mini-jumbo MTU for FCoE */ + #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 + + /* FCoE L2 `fastpath' entry is right after the eth entries */ + #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) + #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) + #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) + #define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ + txdata[FIRST_TX_COS_INDEX].var) + + + #define IS_ETH_FP(fp) (fp->index < \ + BNX2X_NUM_ETH_QUEUES(fp->bp)) + #ifdef BCM_CNIC + #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) + #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) + #else + #define IS_FCOE_FP(fp) false + #define IS_FCOE_IDX(idx) false + #endif + + + /* MC hsi */ + #define MAX_FETCH_BD 13 /* HW max BDs per packet */ + #define RX_COPY_THRESH 92 + + #define NUM_TX_RINGS 16 + #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) + #define NEXT_PAGE_TX_DESC_CNT 1 + #define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) + #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) + #define MAX_TX_BD (NUM_TX_BD - 1) + #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) + #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ + (MAX_TX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ + (x) + 1) + #define TX_BD(x) ((x) & MAX_TX_BD) + #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) + + /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ + #define NUM_RX_RINGS 8 + #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) + #define NEXT_PAGE_RX_DESC_CNT 2 + #define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) + #define RX_DESC_MASK (RX_DESC_CNT - 1) + #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) + #define MAX_RX_BD (NUM_RX_BD - 1) + #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) + + /* dropless fc calculations for BDs + * + * Number of BDs should as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT + * "next" elements on each page + */ + #define NUM_BD_REQ BRB_SIZE(bp) + #define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ + MAX_RX_DESC_CNT) + #define BD_TH_LO(bp) (NUM_BD_REQ + \ + NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ + FW_DROP_LEVEL(bp)) + #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) + + #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) + + #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ + ETH_MIN_RX_CQES_WITH_TPA_E1 : \ + ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) + #define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA + #define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) + #define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ + MIN_RX_AVAIL)) + + #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ + (MAX_RX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ + (x) + 1) + #define RX_BD(x) ((x) & MAX_RX_BD) + + /* + * As long as CQE is X times bigger than BD entry we have to allocate X times + * more pages for CQ ring in order to keep it balanced with BD ring + */ + #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) + #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) + #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) + #define NEXT_PAGE_RCQ_DESC_CNT 1 + #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) + #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) + #define MAX_RCQ_BD (NUM_RCQ_BD - 1) + #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) + #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ + (MAX_RCQ_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ + (x) + 1) + #define RCQ_BD(x) ((x) & MAX_RCQ_BD) + + /* dropless fc calculations for RCQs + * + * Number of RCQs should be as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT + * "next" elements on each page + */ + #define NUM_RCQ_REQ BRB_SIZE(bp) + #define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ + MAX_RCQ_DESC_CNT) + #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ + NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ + FW_DROP_LEVEL(bp)) + #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) + + + /* This is needed for determining of last_max */ + #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) + #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) + + + #define BNX2X_SWCID_SHIFT 17 + #define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) + + /* used on a CID received from the HW */ + #define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) + #define CQE_CMD(x) (le32_to_cpu(x) >> \ + COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) + + #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \ + le32_to_cpu((bd)->addr_lo)) + #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) + + #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ + #define BNX2X_DB_SHIFT 7 /* 128 bytes*/ + #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) + #error "Min DB doorbell stride is 8" + #endif + #define DPM_TRIGER_TYPE 0x40 + #define DOORBELL(bp, cid, val) \ + do { \ + writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ + DPM_TRIGER_TYPE); \ + } while (0) + + + /* TX CSUM helpers */ + #define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ + skb->csum_offset) + #define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ + skb->csum_offset)) + + #define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) + + #define XMIT_PLAIN 0 + #define XMIT_CSUM_V4 0x1 + #define XMIT_CSUM_V6 0x2 + #define XMIT_CSUM_TCP 0x4 + #define XMIT_GSO_V4 0x8 + #define XMIT_GSO_V6 0x10 + + #define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6) + #define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6) + + + /* stuff added to make the code fit 80Col */ + #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) + #define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) + #define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) + #define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) + #define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) + + #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG + + #define BNX2X_IP_CSUM_ERR(cqe) \ + (!((cqe)->fast_path_cqe.status_flags & \ + ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ + ((cqe)->fast_path_cqe.type_error_flags & \ + ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) + + #define BNX2X_L4_CSUM_ERR(cqe) \ + (!((cqe)->fast_path_cqe.status_flags & \ + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ + ((cqe)->fast_path_cqe.type_error_flags & \ + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) + + #define BNX2X_RX_CSUM_OK(cqe) \ + (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) + + #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ + (((le16_to_cpu(flags) & \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \ + == PRS_FLAG_OVERETH_IPV4) + #define BNX2X_RX_SUM_FIX(cqe) \ + BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) + + + #define FP_USB_FUNC_OFF \ + offsetof(struct cstorm_status_block_u, func) + #define FP_CSB_FUNC_OFF \ + offsetof(struct cstorm_status_block_c, func) + + #define HC_INDEX_ETH_RX_CQ_CONS 1 + + #define HC_INDEX_OOO_TX_CQ_CONS 4 + + #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 + + #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 + + #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 + + #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 + + #define BNX2X_RX_SB_INDEX \ + (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) + + #define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0 + + #define BNX2X_TX_SB_INDEX_COS0 \ + (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]) + + /* end of fast path */ + + /* common */ + + struct bnx2x_common { + + u32 chip_id; + /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ + #define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0) + + #define CHIP_NUM(bp) (bp->common.chip_id >> 16) + #define CHIP_NUM_57710 0x164e + #define CHIP_NUM_57711 0x164f + #define CHIP_NUM_57711E 0x1650 + #define CHIP_NUM_57712 0x1662 + #define CHIP_NUM_57712_MF 0x1663 + #define CHIP_NUM_57713 0x1651 + #define CHIP_NUM_57713E 0x1652 + #define CHIP_NUM_57800 0x168a + #define CHIP_NUM_57800_MF 0x16a5 + #define CHIP_NUM_57810 0x168e + #define CHIP_NUM_57810_MF 0x16ae + #define CHIP_NUM_57840 0x168d + #define CHIP_NUM_57840_MF 0x16ab + #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) + #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) + #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) + #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) + #define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) + #define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) + #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) + #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) + #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) + #define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) + #define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) + #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ + CHIP_IS_57711E(bp)) + #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ + CHIP_IS_57712_MF(bp)) + #define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ + CHIP_IS_57800_MF(bp) || \ + CHIP_IS_57810(bp) || \ + CHIP_IS_57810_MF(bp) || \ + CHIP_IS_57840(bp) || \ + CHIP_IS_57840_MF(bp)) + #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) + #define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) + #define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) + + #define CHIP_REV_SHIFT 12 + #define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) + #define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) + #define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) + #define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) + /* assume maximum 5 revisions */ + #define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) + /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ + #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + !(CHIP_REV_VAL(bp) & 0x00001000)) + /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ + #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + (CHIP_REV_VAL(bp) & 0x00001000)) + + #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ + ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) + + #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) + #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) + #define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ + (CHIP_REV_SHIFT + 1)) \ + << CHIP_REV_SHIFT) + #define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ + CHIP_REV_SIM(bp) :\ + CHIP_REV_VAL(bp)) + #define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Bx)) + #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Ax)) + + int flash_size; + #define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ + #define BNX2X_NVRAM_TIMEOUT_COUNT 30000 + #define BNX2X_NVRAM_PAGE_SIZE 256 + + u32 shmem_base; + u32 shmem2_base; + u32 mf_cfg_base; + u32 mf2_cfg_base; + + u32 hw_config; + + u32 bc_ver; + + u8 int_block; + #define INT_BLOCK_HC 0 + #define INT_BLOCK_IGU 1 + #define INT_BLOCK_MODE_NORMAL 0 + #define INT_BLOCK_MODE_BW_COMP 2 + #define CHIP_INT_MODE_IS_NBC(bp) \ + (!CHIP_IS_E1x(bp) && \ + !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) + #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) + + u8 chip_port_mode; + #define CHIP_4_PORT_MODE 0x0 + #define CHIP_2_PORT_MODE 0x1 + #define CHIP_PORT_MODE_NONE 0x2 + #define CHIP_MODE(bp) (bp->common.chip_port_mode) + #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE) + }; + + /* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ + #define BNX2X_IGU_STAS_MSG_VF_CNT 64 + #define BNX2X_IGU_STAS_MSG_PF_CNT 4 + + /* end of common */ + + /* port */ + + struct bnx2x_port { + u32 pmf; + + u32 link_config[LINK_CONFIG_SIZE]; + + u32 supported[LINK_CONFIG_SIZE]; + /* link settings - missing defines */ + #define SUPPORTED_2500baseX_Full (1 << 15) + + u32 advertising[LINK_CONFIG_SIZE]; + /* link settings - missing defines */ + #define ADVERTISED_2500baseX_Full (1 << 15) + + u32 phy_addr; + + /* used to synchronize phy accesses */ + struct mutex phy_mutex; + int need_hw_lock; + + u32 port_stx; + + struct nig_stats old_nig_stats; + }; + + /* end of port */ + + #define STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_stats, stat_name) / 4) + + /* slow path */ + + /* slow path work-queue */ + extern struct workqueue_struct *bnx2x_wq; + + #define BNX2X_MAX_NUM_OF_VFS 64 + #define BNX2X_VF_ID_INVALID 0xFF + + /* + * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is + * control by the number of fast-path status blocks supported by the + * device (HW/FW). Each fast-path status block (FP-SB) aka non-default + * status block represents an independent interrupts context that can + * serve a regular L2 networking queue. However special L2 queues such + * as the FCoE queue do not require a FP-SB and other components like + * the CNIC may consume FP-SB reducing the number of possible L2 queues + * + * If the maximum number of FP-SB available is X then: + * a. If CNIC is supported it consumes 1 FP-SB thus the max number of + * regular L2 queues is Y=X-1 + * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * c. If the FCoE L2 queue is supported the actual number of L2 queues + * is Y+1 + * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for + * slow-path interrupts) or Y+2 if CNIC is supported (one additional + * FP interrupt context for the CNIC). + * e. The number of HW context (CID count) is always X or X+1 if FCoE + * L2 queue is supported. the cid for the FCoE L2 queue is always X. + */ + + /* fast-path interrupt contexts E1x */ + #define FP_SB_MAX_E1x 16 + /* fast-path interrupt contexts E2 */ + #define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 + + union cdu_context { + struct eth_context eth; + char pad[1024]; + }; + + /* CDU host DB constants */ + #define CDU_ILT_PAGE_SZ_HW 3 + #define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ + #define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) + + #ifdef BCM_CNIC + #define CNIC_ISCSI_CID_MAX 256 + #define CNIC_FCOE_CID_MAX 2048 + #define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) + #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) + #endif + + #define QM_ILT_PAGE_SZ_HW 0 + #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ + #define QM_CID_ROUND 1024 + + #ifdef BCM_CNIC + /* TM (timers) host DB constants */ + #define TM_ILT_PAGE_SZ_HW 0 + #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ + /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ + #define TM_CONN_NUM 1024 + #define TM_ILT_SZ (8 * TM_CONN_NUM) + #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) + + /* SRC (Searcher) host DB constants */ + #define SRC_ILT_PAGE_SZ_HW 0 + #define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ + #define SRC_HASH_BITS 10 + #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ + #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) + #define SRC_T2_SZ SRC_ILT_SZ + #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) + + #endif + + #define MAX_DMAE_C 8 + + /* DMA memory not used in fastpath */ + struct bnx2x_slowpath { + union { + struct mac_configuration_cmd e1x; + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + + union { + struct tstorm_eth_mac_filter_config e1x; + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + union { + struct mac_configuration_cmd e1; + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + struct eth_rss_update_ramrod_data rss_rdata; + + /* Queue State related ramrods are always sent under rtnl_lock */ + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_rdata; + + union { + struct function_start_data func_start; + /* pfc configuration for DCBX ramrod */ + struct flow_control_configuration pfc_config; + } func_rdata; + + /* used by dmae command executer */ + struct dmae_command dmae[MAX_DMAE_C]; + + u32 stats_comp; + union mac_stats mac_stats; + struct nig_stats nig_stats; + struct host_port_stats port_stats; + struct host_func_stats func_stats; + struct host_func_stats func_stats_base; + + u32 wb_comp; + u32 wb_data[4]; + }; + + #define bnx2x_sp(bp, var) (&bp->slowpath->var) + #define bnx2x_sp_mapping(bp, var) \ + (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) + + + /* attn group wiring */ + #define MAX_DYNAMIC_ATTN_GRPS 8 + + struct attn_route { + u32 sig[5]; + }; + + struct iro { + u32 base; + u16 m1; + u16 m2; + u16 m3; + u16 size; + }; + + struct hw_context { + union cdu_context *vcxt; + dma_addr_t cxt_mapping; + size_t size; + }; + + /* forward */ + struct bnx2x_ilt; + + + enum bnx2x_recovery_state { + BNX2X_RECOVERY_DONE, + BNX2X_RECOVERY_INIT, + BNX2X_RECOVERY_WAIT, + BNX2X_RECOVERY_FAILED + }; + + /* + * Event queue (EQ or event ring) MC hsi + * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 + */ + #define NUM_EQ_PAGES 1 + #define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem)) + #define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1) + #define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES) + #define EQ_DESC_MASK (NUM_EQ_DESC - 1) + #define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2) + + /* depends on EQ_DESC_CNT_PAGE being a power of 2 */ + #define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \ + (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1) + + /* depends on the above and on NUM_EQ_PAGES being a power of 2 */ + #define EQ_DESC(x) ((x) & EQ_DESC_MASK) + + #define BNX2X_EQ_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_EQ_CONS]) + + /* This is a data that will be used to create a link report message. + * We will keep the data used for the last link report in order + * to prevent reporting the same link parameters twice. + */ + struct bnx2x_link_report_data { + u16 line_speed; /* Effective line speed */ + unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */ + }; + + enum { + BNX2X_LINK_REPORT_FD, /* Full DUPLEX */ + BNX2X_LINK_REPORT_LINK_DOWN, + BNX2X_LINK_REPORT_RX_FC_ON, + BNX2X_LINK_REPORT_TX_FC_ON, + }; + + enum { + BNX2X_PORT_QUERY_IDX, + BNX2X_PF_QUERY_IDX, + BNX2X_FIRST_QUEUE_QUERY_IDX, + }; + + struct bnx2x_fw_stats_req { + struct stats_query_header hdr; + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; + }; + + struct bnx2x_fw_stats_data { + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; + struct per_queue_stats queue_stats[1]; + }; + + /* Public slow path states */ + enum { + BNX2X_SP_RTNL_SETUP_TC, + BNX2X_SP_RTNL_TX_TIMEOUT, + }; + + + struct bnx2x { + /* Fields used in the tx and intr/napi performance paths + * are grouped together in the beginning of the structure + */ + struct bnx2x_fastpath *fp; + void __iomem *regview; + void __iomem *doorbells; + u16 db_size; + + u8 pf_num; /* absolute PF number */ + u8 pfid; /* per-path PF number */ + int base_fw_ndsb; /**/ + #define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) + #define BP_PORT(bp) (bp->pfid & 1) + #define BP_FUNC(bp) (bp->pfid) + #define BP_ABS_FUNC(bp) (bp->pf_num) + #define BP_VN(bp) ((bp)->pfid >> 1) + #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) + #define BP_L_ID(bp) (BP_VN(bp) << 2) + #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ + (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) + #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) + + struct net_device *dev; + struct pci_dev *pdev; + + const struct iro *iro_arr; + #define IRO (bp->iro_arr) + + enum bnx2x_recovery_state recovery_state; + int is_leader; + struct msix_entry *msix_table; + + int tx_ring_size; + + /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ + #define ETH_OVREHEAD (ETH_HLEN + 8 + 8) + #define ETH_MIN_PACKET_SIZE 60 + #define ETH_MAX_PACKET_SIZE 1500 + #define ETH_MAX_JUMBO_PACKET_SIZE 9600 + + /* Max supported alignment is 256 (8 shift) */ + #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ + L1_CACHE_SHIFT : 8) + /* FW use 2 Cache lines Alignment for start packet and size */ + #define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) + #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) + + struct host_sp_status_block *def_status_blk; + #define DEF_SB_IGU_ID 16 + #define DEF_SB_ID HC_SP_SB_ID + __le16 def_idx; + __le16 def_att_idx; + u32 attn_state; + struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; + + /* slow path ring */ + struct eth_spe *spq; + dma_addr_t spq_mapping; + u16 spq_prod_idx; + struct eth_spe *spq_prod_bd; + struct eth_spe *spq_last_bd; + __le16 *dsb_sp_prod; + atomic_t cq_spq_left; /* ETH_XXX ramrods credit */ + /* used to synchronize spq accesses */ + spinlock_t spq_lock; + + /* event queue */ + union event_ring_elem *eq_ring; + dma_addr_t eq_mapping; + u16 eq_prod; + u16 eq_cons; + __le16 *eq_cons_sb; + atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ + + + + /* Counter for marking that there is a STAT_QUERY ramrod pending */ + u16 stats_pending; + /* Counter for completed statistics ramrods */ + u16 stats_comp; + + /* End of fields used in the performance code paths */ + + int panic; + int msg_enable; + + u32 flags; + #define PCIX_FLAG (1 << 0) + #define PCI_32BIT_FLAG (1 << 1) + #define ONE_PORT_FLAG (1 << 2) + #define NO_WOL_FLAG (1 << 3) + #define USING_DAC_FLAG (1 << 4) + #define USING_MSIX_FLAG (1 << 5) + #define USING_MSI_FLAG (1 << 6) + #define DISABLE_MSI_FLAG (1 << 7) + #define TPA_ENABLE_FLAG (1 << 8) + #define NO_MCP_FLAG (1 << 9) + + #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) + #define MF_FUNC_DIS (1 << 11) + #define OWN_CNIC_IRQ (1 << 12) + #define NO_ISCSI_OOO_FLAG (1 << 13) + #define NO_ISCSI_FLAG (1 << 14) + #define NO_FCOE_FLAG (1 << 15) + + #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) + #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) + #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) + + int pm_cap; + int mrrs; + + struct delayed_work sp_task; + struct delayed_work sp_rtnl_task; + + struct delayed_work period_task; + struct timer_list timer; + int current_interval; + + u16 fw_seq; + u16 fw_drv_pulse_wr_seq; + u32 func_stx; + + struct link_params link_params; + struct link_vars link_vars; + u32 link_cnt; + struct bnx2x_link_report_data last_reported_link; + + struct mdio_if_info mdio; + + struct bnx2x_common common; + struct bnx2x_port port; + + struct cmng_struct_per_port cmng; + u32 vn_weight_sum; + u32 mf_config[E1HVN_MAX]; + u32 mf2_config[E2_FUNC_MAX]; + u32 path_has_ovlan; /* E3 */ + u16 mf_ov; + u8 mf_mode; + #define IS_MF(bp) (bp->mf_mode != 0) + #define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) + #define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) + + u8 wol; + + int rx_ring_size; + + u16 tx_quick_cons_trip_int; + u16 tx_quick_cons_trip; + u16 tx_ticks_int; + u16 tx_ticks; + + u16 rx_quick_cons_trip_int; + u16 rx_quick_cons_trip; + u16 rx_ticks_int; + u16 rx_ticks; + /* Maximal coalescing timeout in us */ + #define BNX2X_MAX_COALESCE_TOUT (0xf0*12) + + u32 lin_cnt; + + u16 state; + #define BNX2X_STATE_CLOSED 0 + #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 + #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 + #define BNX2X_STATE_OPEN 0x3000 + #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 + #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 + + #define BNX2X_STATE_DIAG 0xe000 + #define BNX2X_STATE_ERROR 0xf000 + + int multi_mode; + #define BNX2X_MAX_PRIORITY 8 + #define BNX2X_MAX_ENTRIES_PER_PRI 16 + #define BNX2X_MAX_COS 3 + #define BNX2X_MAX_TX_COS 2 + int num_queues; + int disable_tpa; + + u32 rx_mode; + #define BNX2X_RX_MODE_NONE 0 + #define BNX2X_RX_MODE_NORMAL 1 + #define BNX2X_RX_MODE_ALLMULTI 2 + #define BNX2X_RX_MODE_PROMISC 3 + #define BNX2X_MAX_MULTICAST 64 + + u8 igu_dsb_id; + u8 igu_base_sb; + u8 igu_sb_cnt; + dma_addr_t def_status_blk_mapping; + + struct bnx2x_slowpath *slowpath; + dma_addr_t slowpath_mapping; + + /* Total number of FW statistics requests */ + u8 fw_stats_num; + + /* + * This is a memory buffer that will contain both statistics + * ramrod request and data. + */ + void *fw_stats; + dma_addr_t fw_stats_mapping; + + /* + * FW statistics request shortcut (points at the + * beginning of fw_stats buffer). + */ + struct bnx2x_fw_stats_req *fw_stats_req; + dma_addr_t fw_stats_req_mapping; + int fw_stats_req_sz; + + /* + * FW statistics data shortcut (points at the begining of + * fw_stats buffer + fw_stats_req_sz). + */ + struct bnx2x_fw_stats_data *fw_stats_data; + dma_addr_t fw_stats_data_mapping; + int fw_stats_data_sz; + + struct hw_context context; + + struct bnx2x_ilt *ilt; + #define BP_ILT(bp) ((bp)->ilt) + #define ILT_MAX_LINES 256 + /* + * Maximum supported number of RSS queues: number of IGU SBs minus one that goes + * to CNIC. + */ + #define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) + + /* + * Maximum CID count that might be required by the bnx2x: + * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) + */ + #define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ + NON_ETH_CONTEXT_USE + CNIC_PRESENT) + #define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ + ILT_PAGE_CIDS)) + #define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT)) + + int qm_cid_count; + + int dropless_fc; + + #ifdef BCM_CNIC + u32 cnic_flags; + #define BNX2X_CNIC_FLAG_MAC_SET 1 + void *t2; + dma_addr_t t2_mapping; + struct cnic_ops __rcu *cnic_ops; + void *cnic_data; + u32 cnic_tag; + struct cnic_eth_dev cnic_eth_dev; + union host_hc_status_block cnic_sb; + dma_addr_t cnic_sb_mapping; + struct eth_spe *cnic_kwq; + struct eth_spe *cnic_kwq_prod; + struct eth_spe *cnic_kwq_cons; + struct eth_spe *cnic_kwq_last; + u16 cnic_kwq_pending; + u16 cnic_spq_pending; + u8 fip_mac[ETH_ALEN]; + struct mutex cnic_mutex; + struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; + + /* Start index of the "special" (CNIC related) L2 cleints */ + u8 cnic_base_cl_id; + #endif + + int dmae_ready; + /* used to synchronize dmae accesses */ + spinlock_t dmae_lock; + + /* used to protect the FW mail box */ + struct mutex fw_mb_mutex; + + /* used to synchronize stats collecting */ + int stats_state; + + /* used for synchronization of concurrent threads statistics handling */ + spinlock_t stats_lock; + + /* used by dmae command loader */ + struct dmae_command stats_dmae; + int executer_idx; + + u16 stats_counter; + struct bnx2x_eth_stats eth_stats; + + struct z_stream_s *strm; + void *gunzip_buf; + dma_addr_t gunzip_mapping; + int gunzip_outlen; + #define FW_BUF_SIZE 0x8000 + #define GUNZIP_BUF(bp) (bp->gunzip_buf) + #define GUNZIP_PHYS(bp) (bp->gunzip_mapping) + #define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen) + + struct raw_op *init_ops; + /* Init blocks offsets inside init_ops */ + u16 *init_ops_offsets; + /* Data blob - has 32 bit granularity */ + u32 *init_data; + u32 init_mode_flags; + #define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) + /* Zipped PRAM blobs - raw data */ + const u8 *tsem_int_table_data; + const u8 *tsem_pram_data; + const u8 *usem_int_table_data; + const u8 *usem_pram_data; + const u8 *xsem_int_table_data; + const u8 *xsem_pram_data; + const u8 *csem_int_table_data; + const u8 *csem_pram_data; + #define INIT_OPS(bp) (bp->init_ops) + #define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets) + #define INIT_DATA(bp) (bp->init_data) + #define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data) + #define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data) + #define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data) + #define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data) + #define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data) + #define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data) + #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) + #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) + + #define PHY_FW_VER_LEN 20 + char fw_ver[32]; + const struct firmware *firmware; + + /* DCB support on/off */ + u16 dcb_state; + #define BNX2X_DCB_STATE_OFF 0 + #define BNX2X_DCB_STATE_ON 1 + + /* DCBX engine mode */ + int dcbx_enabled; + #define BNX2X_DCBX_ENABLED_OFF 0 + #define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1 + #define BNX2X_DCBX_ENABLED_ON_NEG_ON 2 + #define BNX2X_DCBX_ENABLED_INVALID (-1) + + bool dcbx_mode_uset; + + struct bnx2x_config_dcbx_params dcbx_config_params; + struct bnx2x_dcbx_port_params dcbx_port_params; + int dcb_version; + + /* CAM credit pools */ + struct bnx2x_credit_pool_obj macs_pool; + + /* RX_MODE object */ + struct bnx2x_rx_mode_obj rx_mode_obj; + + /* MCAST object */ + struct bnx2x_mcast_obj mcast_obj; + + /* RSS configuration object */ + struct bnx2x_rss_config_obj rss_conf_obj; + + /* Function State controlling object */ + struct bnx2x_func_sp_obj func_obj; + + unsigned long sp_state; + + /* operation indication for the sp_rtnl task */ + unsigned long sp_rtnl_state; + + /* DCBX Negotation results */ + struct dcbx_features dcbx_local_feat; + u32 dcbx_error; + + #ifdef BCM_DCBNL + struct dcbx_features dcbx_remote_feat; + u32 dcbx_remote_flags; + #endif + u32 pending_max; + + /* multiple tx classes of service */ + u8 max_cos; + + /* priority to cos mapping */ + u8 prio_to_cos[8]; + }; + + /* Tx queues may be less or equal to Rx queues */ + extern int num_queues; + #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) + #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) + #define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) + + #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) + + #define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp) + /* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */ + + #define RSS_IPV4_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY + + #define RSS_IPV4_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY + + #define RSS_IPV6_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY + + #define RSS_IPV6_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY + + /* func init flags */ + #define FUNC_FLG_RSS 0x0001 + #define FUNC_FLG_STATS 0x0002 + /* removed FUNC_FLG_UNMATCHED 0x0004 */ + #define FUNC_FLG_TPA 0x0008 + #define FUNC_FLG_SPQ 0x0010 + #define FUNC_FLG_LEADING 0x0020 /* PF only */ + + + struct bnx2x_func_init_params { + /* dma */ + dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ + dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ + + u16 func_flgs; + u16 func_id; /* abs fid */ + u16 pf_id; + u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ + }; + + #define for_each_eth_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + + #define for_each_nondefault_eth_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + + #define for_each_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + + /* Skip forwarding FP */ + #define for_each_rx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_rx_queue(bp, var)) \ + continue; \ + else + + /* Skip OOO FP */ + #define for_each_tx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_tx_queue(bp, var)) \ + continue; \ + else + + #define for_each_nondefault_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + + #define for_each_cos_in_tx_queue(fp, var) \ + for ((var) = 0; (var) < (fp)->max_cos; (var)++) + + /* skip rx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ + #define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + + /* skip tx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ + #define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + + #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + + + + + /** + * bnx2x_set_mac_one - configure a single MAC address + * + * @bp: driver handle + * @mac: MAC to configure + * @obj: MAC object handle + * @set: if 'true' add a new MAC, otherwise - delete + * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) + * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) + * + * Configures one MAC according to provided parameters or continues the + * execution of previously scheduled commands if RAMROD_CONT is set in + * ramrod_flags. + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ + int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags); + /** + * Deletes all MACs configured for the specific MAC object. + * + * @param bp Function driver instance + * @param mac_obj MAC object to cleanup + * + * @return zero if all MACs were cleaned + */ + + /** + * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object + * + * @bp: driver handle + * @mac_obj: MAC object handle + * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) + * @wait_for_comp: if 'true' block until completion + * + * Deletes all MACs of the specific type (e.g. ETH, UC list). + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ + int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp); + + /* Init Function API */ + void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); + int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); + int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); + int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); + int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); + void bnx2x_read_mf_cfg(struct bnx2x *bp); + + + /* dmae */ + void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); + void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, + u32 len32); + void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); + u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type); + u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); + u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, + bool with_comp, u8 comp_type); + + + void bnx2x_calc_fc_adv(struct bnx2x *bp); + int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int cmd_type); + void bnx2x_update_coalesce(struct bnx2x *bp); + int bnx2x_get_cur_phy_idx(struct bnx2x *bp); + + static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, + int wait) + { + u32 val; + + do { + val = REG_RD(bp, reg); + if (val == expected) + break; + ms -= wait; + msleep(wait); + + } while (ms > 0); + + return val; + } + + #define BNX2X_ILT_ZALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) \ + memset(x, 0, size); \ + } while (0) + + #define BNX2X_ILT_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + + #define ILOG2(x) (ilog2((x))) + + #define ILT_NUM_PAGE_ENTRIES (3072) + /* In 57710/11 we use whole table since we have 8 func + * In 57712 we have only 4 func, but use same size per func, then only half of + * the table in use + */ + #define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8) + + #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) + /* + * the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ + #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) + #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) + + /* load/unload mode */ + #define LOAD_NORMAL 0 + #define LOAD_OPEN 1 + #define LOAD_DIAG 2 + #define UNLOAD_NORMAL 0 + #define UNLOAD_CLOSE 1 + #define UNLOAD_RECOVERY 2 + + + /* DMAE command defines */ + #define DMAE_TIMEOUT -1 + #define DMAE_PCI_ERROR -2 /* E2 and onward */ + #define DMAE_NOT_RDY -3 + #define DMAE_PCI_ERR_FLAG 0x80000000 + + #define DMAE_SRC_PCI 0 + #define DMAE_SRC_GRC 1 + + #define DMAE_DST_NONE 0 + #define DMAE_DST_PCI 1 + #define DMAE_DST_GRC 2 + + #define DMAE_COMP_PCI 0 + #define DMAE_COMP_GRC 1 + + /* E2 and onward - PCI error handling in the completion */ + + #define DMAE_COMP_REGULAR 0 + #define DMAE_COM_SET_ERR 1 + + #define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \ + DMAE_COMMAND_SRC_SHIFT) + #define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \ + DMAE_COMMAND_SRC_SHIFT) + + #define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \ + DMAE_COMMAND_DST_SHIFT) + #define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \ + DMAE_COMMAND_DST_SHIFT) + + #define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \ + DMAE_COMMAND_C_DST_SHIFT) + #define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \ + DMAE_COMMAND_C_DST_SHIFT) + + #define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE + + #define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT) + #define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT) + #define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT) + #define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT) + + #define DMAE_CMD_PORT_0 0 + #define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT + + #define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET + #define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET + #define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT + + #define DMAE_SRC_PF 0 + #define DMAE_SRC_VF 1 + + #define DMAE_DST_PF 0 + #define DMAE_DST_VF 1 + + #define DMAE_C_SRC 0 + #define DMAE_C_DST 1 + + #define DMAE_LEN32_RD_MAX 0x80 + #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) + + #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit + indicates eror */ + + #define MAX_DMAE_C_PER_PORT 8 + #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + BP_VN(bp)) + #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + E1HVN_MAX) + + /* PCIE link and speed */ + #define PCICFG_LINK_WIDTH 0x1f00000 + #define PCICFG_LINK_WIDTH_SHIFT 20 + #define PCICFG_LINK_SPEED 0xf0000 + #define PCICFG_LINK_SPEED_SHIFT 16 + + + #define BNX2X_NUM_TESTS 7 + + #define BNX2X_PHY_LOOPBACK 0 + #define BNX2X_MAC_LOOPBACK 1 + #define BNX2X_PHY_LOOPBACK_FAILED 1 + #define BNX2X_MAC_LOOPBACK_FAILED 2 + #define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ + BNX2X_PHY_LOOPBACK_FAILED) + + + #define STROM_ASSERT_ARRAY_SIZE 50 + + + /* must be used on a CID before placing it on a HW ring */ + #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ + (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ + (x)) + + #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) + #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) + + + #define BNX2X_BTR 4 + #define MAX_SPQ_PENDING 8 + + /* CMNG constants, as derived from system spec calculations */ + /* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ + #define DEF_MIN_RATE 100 + /* resolution of the rate shaping timer - 400 usec */ + #define RS_PERIODIC_TIMEOUT_USEC 400 + /* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer */ + #define QM_ARB_BYTES 160000 + /* resolution of Min algorithm 1:100 */ + #define MIN_RES 100 + /* how many bytes above threshold for the minimal credit of Min algorithm*/ + #define MIN_ABOVE_THRESH 32768 + /* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair */ + #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) + /* Memory of fairness algorithm . 2 cycles */ + #define FAIR_MEM 2 + + + #define ATTN_NIG_FOR_FUNC (1L << 8) + #define ATTN_SW_TIMER_4_FUNC (1L << 9) + #define GPIO_2_FUNC (1L << 10) + #define GPIO_3_FUNC (1L << 11) + #define GPIO_4_FUNC (1L << 12) + #define ATTN_GENERAL_ATTN_1 (1L << 13) + #define ATTN_GENERAL_ATTN_2 (1L << 14) + #define ATTN_GENERAL_ATTN_3 (1L << 15) + #define ATTN_GENERAL_ATTN_4 (1L << 13) + #define ATTN_GENERAL_ATTN_5 (1L << 14) + #define ATTN_GENERAL_ATTN_6 (1L << 15) + + #define ATTN_HARD_WIRED_MASK 0xff00 + #define ATTENTION_ID 4 + + + /* stuff added to make the code fit 80Col */ + + #define BNX2X_PMF_LINK_ASSERT \ + GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp)) + + #define BNX2X_MC_ASSERT_BITS \ + (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT)) + + #define BNX2X_MCP_ASSERT \ + GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT) + + #define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) + #define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) + + #define HW_INTERRUT_ASSERT_SET_0 \ + (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) + #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) + #define HW_INTERRUT_ASSERT_SET_1 \ + (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) + #define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) + #define HW_INTERRUT_ASSERT_SET_2 \ + (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) + #define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) + + #define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + + #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) + + #define RSS_FLAGS(bp) \ + (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ + (bp->multi_mode << \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) + #define MULTI_MASK 0x7f + + + #define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) + #define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) + #define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) + #define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) + + #define DEF_USB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_u, igu_index) + #define DEF_CSB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_c, igu_index) + #define DEF_XSB_IGU_INDEX_OFF \ + offsetof(struct xstorm_def_status_block, igu_index) + #define DEF_TSB_IGU_INDEX_OFF \ + offsetof(struct tstorm_def_status_block, igu_index) + + #define DEF_USB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_u, segment) + #define DEF_CSB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_c, segment) + #define DEF_XSB_SEGMENT_OFF \ + offsetof(struct xstorm_def_status_block, segment) + #define DEF_TSB_SEGMENT_OFF \ + offsetof(struct tstorm_def_status_block, segment) + + #define BNX2X_SP_DSB_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_DEF_CONS]) + + #define SET_FLAG(value, mask, flag) \ + do {\ + (value) &= ~(mask);\ + (value) |= ((flag) << (mask##_SHIFT));\ + } while (0) + + #define GET_FLAG(value, mask) \ + (((value) & (mask)) >> (mask##_SHIFT)) + + #define GET_FIELD(value, fname) \ + (((value) & (fname##_MASK)) >> (fname##_SHIFT)) + + #define CAM_IS_INVALID(x) \ + (GET_FLAG(x.flags, \ + MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ + (T_ETH_MAC_COMMAND_INVALIDATE)) + + /* Number of u32 elements in MC hash array */ + #define MC_HASH_SIZE 8 + #define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ + TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) + + + #ifndef PXP2_REG_PXP2_INT_STS + #define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 + #endif + + #ifndef ETH_MAX_RX_CLIENTS_E2 + #define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H + #endif + + #define BNX2X_VPD_LEN 128 + #define VENDOR_ID_LEN 4 + + /* Congestion management fairness mode */ + #define CMNG_FNS_NONE 0 + #define CMNG_FNS_MINMAX 1 + + #define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/ + #define HC_SEG_ACCESS_ATTN 4 + #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ + + static const u32 dmae_reg_go_c[] = { + DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, + DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, + DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, + DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 + }; + + void bnx2x_set_ethtool_ops(struct net_device *netdev); + void bnx2x_notify_link_changed(struct bnx2x *bp); + #endif /* bnx2x.h */ diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 0000000,5b1f9b5..283d663 mode 000000,100644..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@@ -1,0 -1,1491 +1,1491 @@@ + /* bnx2x_cmn.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein eilong@broadcom.com + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + #ifndef BNX2X_CMN_H + #define BNX2X_CMN_H + + #include <linux/types.h> + #include <linux/pci.h> + #include <linux/netdevice.h> + + + #include "bnx2x.h" + + /* This is used as a replacement for an MCP if it's not present */ + extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ + + extern int num_queues; + + /************************ Macros ********************************/ + #define BNX2X_PCI_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + + #define BNX2X_FREE(x) \ + do { \ + if (x) { \ + kfree((void *)x); \ + x = NULL; \ + } \ + } while (0) + + #define BNX2X_PCI_ALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + memset((void *)x, 0, size); \ + } while (0) + + #define BNX2X_ALLOC(x, size) \ + do { \ + x = kzalloc(size, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + } while (0) + + /*********************** Interfaces **************************** + * Functions that need to be implemented by each driver version + */ + /* Init */ + + /** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ + u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); + + /** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + */ + void bnx2x_send_unload_done(struct bnx2x *bp); + + /** + * bnx2x_config_rss_pf - configure RSS parameters. + * + * @bp: driver handle + * @ind_table: indirection table to configure + * @config_hash: re-configure RSS hash keys configuration + */ + int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); + + /** + * bnx2x__init_func_obj - init function object + * + * @bp: driver handle + * + * Initializes the Function Object with the appropriate + * parameters which include a function slow path driver + * interface. + */ + void bnx2x__init_func_obj(struct bnx2x *bp); + + /** + * bnx2x_setup_queue - setup eth queue. + * + * @bp: driver handle + * @fp: pointer to the fastpath structure + * @leading: boolean + * + */ + int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading); + + /** + * bnx2x_setup_leading - bring up a leading eth queue. + * + * @bp: driver handle + */ + int bnx2x_setup_leading(struct bnx2x *bp); + + /** + * bnx2x_fw_command - send the MCP a request + * + * @bp: driver handle + * @command: request + * @param: request's parameter + * + * block until there is a reply + */ + u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); + + /** + * bnx2x_initial_phy_init - initialize link parameters structure variables. + * + * @bp: driver handle + * @load_mode: current mode + */ + u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); + + /** + * bnx2x_link_set - configure hw according to link parameters structure. + * + * @bp: driver handle + */ + void bnx2x_link_set(struct bnx2x *bp); + + /** + * bnx2x_link_test - query link status. + * + * @bp: driver handle + * @is_serdes: bool + * + * Returns 0 if link is UP. + */ + u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); + + /** + * bnx2x_drv_pulse - write driver pulse to shmem + * + * @bp: driver handle + * + * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox + * in the shmem. + */ + void bnx2x_drv_pulse(struct bnx2x *bp); + + /** + * bnx2x_igu_ack_sb - update IGU with current SB value + * + * @bp: driver handle + * @igu_sb_id: SB id + * @segment: SB segment + * @index: SB index + * @op: SB operation + * @update: is HW update required + */ + void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update); + + /* Disable transactions from chip to host */ + void bnx2x_pf_disable(struct bnx2x *bp); + + /** + * bnx2x__link_status_update - handles link status change. + * + * @bp: driver handle + */ + void bnx2x__link_status_update(struct bnx2x *bp); + + /** + * bnx2x_link_report - report link status to upper layer. + * + * @bp: driver handle + */ + void bnx2x_link_report(struct bnx2x *bp); + + /* None-atomic version of bnx2x_link_report() */ + void __bnx2x_link_report(struct bnx2x *bp); + + /** + * bnx2x_get_mf_speed - calculate MF speed. + * + * @bp: driver handle + * + * Takes into account current linespeed and MF configuration. + */ + u16 bnx2x_get_mf_speed(struct bnx2x *bp); + + /** + * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ + irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); + + /** + * bnx2x_interrupt - non MSI-X interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ + irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); + #ifdef BCM_CNIC + + /** + * bnx2x_cnic_notify - send command to cnic driver + * + * @bp: driver handle + * @cmd: command + */ + int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); + + /** + * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information + * + * @bp: driver handle + */ + void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); + #endif + + /** + * bnx2x_int_enable - enable HW interrupts. + * + * @bp: driver handle + */ + void bnx2x_int_enable(struct bnx2x *bp); + + /** + * bnx2x_int_disable_sync - disable interrupts. + * + * @bp: driver handle + * @disable_hw: true, disable HW interrupts. + * + * This function ensures that there are no + * ISRs or SP DPCs (sp_task) are running after it returns. + */ + void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); + + /** + * bnx2x_nic_init - init driver internals. + * + * @bp: driver handle + * @load_code: COMMON, PORT or FUNCTION + * + * Initializes: + * - rings + * - status blocks + * - etc. + */ + void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); + + /** + * bnx2x_alloc_mem - allocate driver's memory. + * + * @bp: driver handle + */ + int bnx2x_alloc_mem(struct bnx2x *bp); + + /** + * bnx2x_free_mem - release driver's memory. + * + * @bp: driver handle + */ + void bnx2x_free_mem(struct bnx2x *bp); + + /** + * bnx2x_set_num_queues - set number of queues according to mode. + * + * @bp: driver handle + */ + void bnx2x_set_num_queues(struct bnx2x *bp); + + /** + * bnx2x_chip_cleanup - cleanup chip internals. + * + * @bp: driver handle + * @unload_mode: COMMON, PORT, FUNCTION + * + * - Cleanup MAC configuration. + * - Closes clients. + * - etc. + */ + void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); + + /** + * bnx2x_acquire_hw_lock - acquire HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ + int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); + + /** + * bnx2x_release_hw_lock - release HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ + int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); + + /** + * bnx2x_release_leader_lock - release recovery leader lock + * + * @bp: driver handle + */ + int bnx2x_release_leader_lock(struct bnx2x *bp); + + /** + * bnx2x_set_eth_mac - configure eth MAC address in the HW + * + * @bp: driver handle + * @set: set or clear + * + * Configures according to the value in netdev->dev_addr. + */ + int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); + + /** + * bnx2x_set_rx_mode - set MAC filtering configurations. + * + * @dev: netdevice + * + * called with netif_tx_lock from dev_mcast.c + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh() + */ + void bnx2x_set_rx_mode(struct net_device *dev); + + /** + * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. + * + * @bp: driver handle + * + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh(). + */ + void bnx2x_set_storm_rx_mode(struct bnx2x *bp); + + /** + * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. + * + * @bp: driver handle + * @cl_id: client id + * @rx_mode_flags: rx mode configuration + * @rx_accept_flags: rx accept configuration + * @tx_accept_flags: tx accept configuration (tx switch) + * @ramrod_flags: ramrod configuration + */ + void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags); + + /* Parity errors related */ + void bnx2x_inc_load_cnt(struct bnx2x *bp); + u32 bnx2x_dec_load_cnt(struct bnx2x *bp); + bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); + bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); + void bnx2x_set_reset_in_progress(struct bnx2x *bp); + void bnx2x_set_reset_global(struct bnx2x *bp); + void bnx2x_disable_close_the_gate(struct bnx2x *bp); + + /** + * bnx2x_sp_event - handle ramrods completion. + * + * @fp: fastpath handle for the event + * @rr_cqe: eth_rx_cqe + */ + void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); + + /** + * bnx2x_ilt_set_info - prepare ILT configurations. + * + * @bp: driver handle + */ + void bnx2x_ilt_set_info(struct bnx2x *bp); + + /** + * bnx2x_dcbx_init - initialize dcbx protocol. + * + * @bp: driver handle + */ + void bnx2x_dcbx_init(struct bnx2x *bp); + + /** + * bnx2x_set_power_state - set power state to the requested value. + * + * @bp: driver handle + * @state: required state D0 or D3hot + * + * Currently only D0 and D3hot are supported. + */ + int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); + + /** + * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. + * + * @bp: driver handle + * @value: new value + */ + void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); + /* Error handling */ + void bnx2x_panic_dump(struct bnx2x *bp); + + void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); + + /* dev_close main block */ + int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); + + /* dev_open main block */ + int bnx2x_nic_load(struct bnx2x *bp, int load_mode); + + /* hard_xmit callback */ + netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); + + /* setup_tc callback */ + int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); + + /* select_queue callback */ + u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); + + /* reload helper */ + int bnx2x_reload_if_running(struct net_device *dev); + + int bnx2x_change_mac_addr(struct net_device *dev, void *p); + + /* NAPI poll Rx part */ + int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); + + void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); + + /* NAPI poll Tx part */ + int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); + + /* suspend/resume callbacks */ + int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); + int bnx2x_resume(struct pci_dev *pdev); + + /* Release IRQ vectors */ + void bnx2x_free_irq(struct bnx2x *bp); + + void bnx2x_free_fp_mem(struct bnx2x *bp); + int bnx2x_alloc_fp_mem(struct bnx2x *bp); + void bnx2x_init_rx_rings(struct bnx2x *bp); + void bnx2x_free_skbs(struct bnx2x *bp); + void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); + void bnx2x_netif_start(struct bnx2x *bp); + + /** + * bnx2x_enable_msix - set msix configuration. + * + * @bp: driver handle + * + * fills msix_table, requests vectors, updates num_queues + * according to number of available vectors. + */ + int bnx2x_enable_msix(struct bnx2x *bp); + + /** + * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly + * + * @bp: driver handle + */ + int bnx2x_enable_msi(struct bnx2x *bp); + + /** + * bnx2x_poll - NAPI callback + * + * @napi: napi structure + * @budget: + * + */ + int bnx2x_poll(struct napi_struct *napi, int budget); + + /** + * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure + * + * @bp: driver handle + */ + int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); + + /** + * bnx2x_free_mem_bp - release memories outsize main driver structure + * + * @bp: driver handle + */ + void bnx2x_free_mem_bp(struct bnx2x *bp); + + /** + * bnx2x_change_mtu - change mtu netdev callback + * + * @dev: net device + * @new_mtu: requested mtu + * + */ + int bnx2x_change_mtu(struct net_device *dev, int new_mtu); + + #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) + /** + * bnx2x_fcoe_get_wwn - return the requested WWN value for this port + * + * @dev: net_device + * @wwn: output buffer + * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) + * + */ + int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); + #endif + u32 bnx2x_fix_features(struct net_device *dev, u32 features); + int bnx2x_set_features(struct net_device *dev, u32 features); + + /** + * bnx2x_tx_timeout - tx timeout netdev callback + * + * @dev: net device + */ + void bnx2x_tx_timeout(struct net_device *dev); + + /*********************** Inlines **********************************/ + /*********************** Fast path ********************************/ + static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) + { + barrier(); /* status block is written to by the chip */ + fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; + } + + static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 bd_prod, + u16 rx_comp_prod, u16 rx_sge_prod, u32 start) + { + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 i; + + /* Update producers */ + rx_prods.bd_prod = bd_prod; + rx_prods.cqe_prod = rx_comp_prod; + rx_prods.sge_prod = rx_sge_prod; + + /* + * Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < sizeof(rx_prods)/4; i++) + REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); + + mmiowb(); /* keep prod updates ordered */ + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); + } + + static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, + u8 segment, u16 index, u8 op, + u8 update, u32 igu_addr) + { + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n", + cmd_data.sb_id_and_flags, igu_addr); + REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); + } + + static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, + u8 idu_sb_id, bool is_Pf) + { + u32 data, ctl, cnt = 100; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; + u32 sb_bit = 1 << (idu_sb_id%32); + u32 func_encode = func | + ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; + + /* Not supported in BC mode */ + if (CHIP_INT_MODE_IS_BC(bp)) + return; + + data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup + << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | + IGU_REGULAR_CLEANUP_SET | + IGU_REGULAR_BCLEANUP; + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + data, igu_addr_data); + REG_WR(bp, igu_addr_data, data); + mmiowb(); + barrier(); + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); + + /* wait for clean up to finish */ + while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) + msleep(20); + + + if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { + DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: " + "idu_sb_id %d offset %d bit %d (cnt %d)\n", + idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); + } + } + + static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, + u8 storm, u16 index, u8 op, u8 update) + { + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_INT_ACK); + struct igu_ack_register igu_ack; + + igu_ack.status_block_index = index; + igu_ack.sb_id_and_flags = + ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", + (*(u32 *)&igu_ack), hc_addr); + REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); + } + + static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, + u16 index, u8 op, u8 update) + { + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); + else { + u8 segment; + + if (CHIP_INT_MODE_IS_BC(bp)) + segment = storm; + else if (igu_sb_id != bp->igu_dsb_id) + segment = IGU_SEG_ACCESS_DEF; + else if (storm == ATTENTION_ID) + segment = IGU_SEG_ACCESS_ATTN; + else + segment = IGU_SEG_ACCESS_DEF; + bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); + } + } + + static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) + { + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_SIMD_MASK); + u32 result = REG_RD(bp, hc_addr); + + DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", + result, hc_addr); + + barrier(); + return result; + } + + static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) + { + u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); + u32 result = REG_RD(bp, igu_addr); + + DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n", + result, igu_addr); + + barrier(); + return result; + } + + static inline u16 bnx2x_ack_int(struct bnx2x *bp) + { + barrier(); + if (bp->common.int_block == INT_BLOCK_HC) + return bnx2x_hc_ack_int(bp); + else + return bnx2x_igu_ack_int(bp); + } + + static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) + { + /* Tell compiler that consumer and producer can change */ + barrier(); + return txdata->tx_pkt_prod != txdata->tx_pkt_cons; + } + + static inline u16 bnx2x_tx_avail(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) + { + s16 used; + u16 prod; + u16 cons; + + prod = txdata->tx_bd_prod; + cons = txdata->tx_bd_cons; + + /* NUM_TX_RINGS = number of "next-page" entries + It will be used as a threshold */ + used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; + + #ifdef BNX2X_STOP_ON_ERROR + WARN_ON(used < 0); + WARN_ON(used > bp->tx_ring_size); + WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); + #endif + + return (s16)(bp->tx_ring_size) - used; + } + + static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) + { + u16 hw_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + return hw_cons != txdata->tx_pkt_cons; + } + + static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) + { + u8 cos; + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + return true; + return false; + } + + static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) + { + u16 rx_cons_sb; + + /* Tell compiler that status block fields can change */ + barrier(); + rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); + if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + rx_cons_sb++; + return (fp->rx_comp_cons != rx_cons_sb); + } + + /** + * bnx2x_tx_disable - disables tx from stack point of view + * + * @bp: driver handle + */ + static inline void bnx2x_tx_disable(struct bnx2x *bp) + { + netif_tx_disable(bp->dev); + netif_carrier_off(bp->dev); + } + + static inline void bnx2x_free_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) + { + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct page *page = sw_buf->page; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + + /* Skip "next page" elements */ + if (!page) + return; + + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + __free_pages(page, PAGES_PER_SGE_SHIFT); + + sw_buf->page = NULL; + sge->addr_hi = 0; + sge->addr_lo = 0; + } + + static inline void bnx2x_add_all_napi(struct bnx2x *bp) + { + int i; + + /* Add NAPI objects */ + for_each_rx_queue(bp, i) + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, BNX2X_NAPI_WEIGHT); + } + + static inline void bnx2x_del_all_napi(struct bnx2x *bp) + { + int i; + + for_each_rx_queue(bp, i) + netif_napi_del(&bnx2x_fp(bp, i, napi)); + } + + static inline void bnx2x_disable_msi(struct bnx2x *bp) + { + if (bp->flags & USING_MSIX_FLAG) { + pci_disable_msix(bp->pdev); + bp->flags &= ~USING_MSIX_FLAG; + } else if (bp->flags & USING_MSI_FLAG) { + pci_disable_msi(bp->pdev); + bp->flags &= ~USING_MSI_FLAG; + } + } + + static inline int bnx2x_calc_num_queues(struct bnx2x *bp) + { + return num_queues ? + min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : + min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); + } + + static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) + { + int i, j; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + int idx = RX_SGE_CNT * i - 1; + + for (j = 0; j < 2; j++) { + BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); + idx--; + } + } + } + + static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) + { + /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ + memset(fp->sge_mask, 0xff, + (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); + + /* Clear the two last indices in the page to 1: + these are the indices that correspond to the "next" element, + hence will never be indicated and should be removed from + the calculations. */ + bnx2x_clear_sge_mask_next_elems(fp); + } + + static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) + { + struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + dma_addr_t mapping; + + if (unlikely(page == NULL)) + return -ENOMEM; + + mapping = dma_map_page(&bp->pdev->dev, page, 0, + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + __free_pages(page, PAGES_PER_SGE_SHIFT); + return -ENOMEM; + } + + sw_buf->page = page; + dma_unmap_addr_set(sw_buf, mapping, mapping); + + sge->addr_hi = cpu_to_le32(U64_HI(mapping)); + sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; + } + + static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) + { + struct sk_buff *skb; + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; + struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; + dma_addr_t mapping; + + skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + if (unlikely(skb == NULL)) + return -ENOMEM; + + mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + rx_buf->skb = skb; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; + } + + /* note that we are not allocating a new skb, + * we are just moving one from cons to prod + * we are not creating a new mapping, + * so there is no need to check for dma_mapping_error(). + */ + static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, + u16 cons, u16 prod) + { + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + prod_rx_buf->skb = cons_rx_buf->skb; + *prod_bd = *cons_bd; + } + + /************************* Init ******************************************/ + + /** + * bnx2x_func_start - init function + * + * @bp: driver handle + * + * Must be called before sending CLIENT_SETUP for the first client. + */ + static inline int bnx2x_func_start(struct bnx2x *bp) + { + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_start_params *start_params = + &func_params.params.start; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_START; + + /* Function parameters */ + start_params->mf_mode = bp->mf_mode; + start_params->sd_vlan_tag = bp->mf_ov; + if (CHIP_IS_E1x(bp)) + start_params->network_cos_mode = OVERRIDE_COS; + else + start_params->network_cos_mode = STATIC_COS; + + return bnx2x_func_state_change(bp, &func_params); + } + + + /** + * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format + * + * @fw_hi: pointer to upper part + * @fw_mid: pointer to middle part + * @fw_lo: pointer to lower part + * @mac: pointer to MAC address + */ + static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, + u8 *mac) + { + ((u8 *)fw_hi)[0] = mac[1]; + ((u8 *)fw_hi)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lo)[0] = mac[5]; + ((u8 *)fw_lo)[1] = mac[4]; + } + + static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) + { + int i; + + if (fp->disable_tpa) + return; + + for (i = 0; i < last; i++) + bnx2x_free_rx_sge(bp, fp, i); + } + + static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) + { + int i; + + for (i = 0; i < last; i++) { + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + struct sk_buff *skb = first_buf->skb; + + if (skb == NULL) { + DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); + continue; + } + if (tpa_info->tpa_state == BNX2X_TPA_START) + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(first_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + dev_kfree_skb(skb); + first_buf->skb = NULL; + } + } + + static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) + { + int i; + + for (i = 1; i <= NUM_TX_RINGS; i++) { + struct eth_tx_next_bd *tx_next_bd = + &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; + + tx_next_bd->addr_hi = + cpu_to_le32(U64_HI(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + tx_next_bd->addr_lo = + cpu_to_le32(U64_LO(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + } + + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); + txdata->tx_db.data.zero_fill1 = 0; + txdata->tx_db.data.prod = 0; + + txdata->tx_pkt_prod = 0; + txdata->tx_pkt_cons = 0; + txdata->tx_bd_prod = 0; + txdata->tx_bd_cons = 0; + txdata->tx_pkt = 0; + } + + static inline void bnx2x_init_tx_rings(struct bnx2x *bp) + { + int i; + u8 cos; + + for_each_tx_queue(bp, i) + for_each_cos_in_tx_queue(&bp->fp[i], cos) + bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); + } + + static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) + { + int i; + + for (i = 1; i <= NUM_RX_RINGS; i++) { + struct eth_rx_bd *rx_bd; + + rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; + rx_bd->addr_hi = + cpu_to_le32(U64_HI(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + rx_bd->addr_lo = + cpu_to_le32(U64_LO(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + } + } + + static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) + { + int i; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + struct eth_rx_sge *sge; + + sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; + sge->addr_hi = + cpu_to_le32(U64_HI(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + + sge->addr_lo = + cpu_to_le32(U64_LO(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + } + } + + static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) + { + int i; + for (i = 1; i <= NUM_RCQ_RINGS; i++) { + struct eth_rx_cqe_next_page *nextpg; + + nextpg = (struct eth_rx_cqe_next_page *) + &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; + nextpg->addr_hi = + cpu_to_le32(U64_HI(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + nextpg->addr_lo = + cpu_to_le32(U64_LO(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + } + } + + /* Returns the number of actually allocated BDs */ + static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, + int rx_ring_size) + { + struct bnx2x *bp = fp->bp; + u16 ring_prod, cqe_ring_prod; + int i; + + fp->rx_comp_cons = 0; + cqe_ring_prod = ring_prod = 0; + + /* This routine is called only during fo init so + * fp->eth_q_stats.rx_skb_alloc_failed = 0 + */ + for (i = 0; i < rx_ring_size; i++) { + if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { + fp->eth_q_stats.rx_skb_alloc_failed++; + continue; + } + ring_prod = NEXT_RX_IDX(ring_prod); + cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); + WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed)); + } + + if (fp->eth_q_stats.rx_skb_alloc_failed) + BNX2X_ERR("was only able to allocate " + "%d rx skbs on queue[%d]\n", + (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index); + + fp->rx_bd_prod = ring_prod; + /* Limit the CQE producer by the CQE ring size */ + fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, + cqe_ring_prod); + fp->rx_pkt = fp->rx_calls = 0; + + return i - fp->eth_q_stats.rx_skb_alloc_failed; + } + + /* Statistics ID are global per chip/path, while Client IDs for E1x are per + * port. + */ + static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) + { + if (!CHIP_IS_E1x(fp->bp)) + return fp->cl_id; + else + return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; + } + + static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, + bnx2x_obj_type obj_type) + { + struct bnx2x *bp = fp->bp; + + /* Configure classification DBs */ + bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, + BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, obj_type, + &bp->macs_pool); + } + + /** + * bnx2x_get_path_func_num - get number of active functions + * + * @bp: driver handle + * + * Calculates the number of active (not hidden) functions on the + * current path. + */ + static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) + { + u8 func_num = 0, i; + + /* 57710 has only one function per-port */ + if (CHIP_IS_E1(bp)) + return 1; + + /* Calculate a number of functions enabled on the current + * PATH/PORT. + */ + if (CHIP_REV_IS_SLOW(bp)) { + if (IS_MF(bp)) + func_num = 4; + else + func_num = 2; + } else { + for (i = 0; i < E1H_FUNC_MAX / 2; i++) { + u32 func_config = + MF_CFG_RD(bp, + func_mf_config[BP_PORT(bp) + 2 * i]. + config); + func_num += + ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); + } + } + + WARN_ON(!func_num); + + return func_num; + } + + static inline void bnx2x_init_bp_objs(struct bnx2x *bp) + { + /* RX_MODE controlling object */ + bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); + + /* multicast configuration controlling object */ + bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, + BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, mcast_rdata), + bnx2x_sp_mapping(bp, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); + + /* Setup CAM credit pools */ + bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), + bnx2x_get_path_func_num(bp)); + + /* RSS configuration object */ + bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, + bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, rss_rdata), + bnx2x_sp_mapping(bp, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); + } + + static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) + { + if (CHIP_IS_E1x(fp->bp)) + return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; + else + return fp->cl_id; + } + + static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) + { + struct bnx2x *bp = fp->bp; + + if (!CHIP_IS_E1x(bp)) + return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + else + return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); + } + + static inline void bnx2x_init_txdata(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, + __le16 *tx_cons_sb) + { + txdata->cid = cid; + txdata->txq_index = txq_index; + txdata->tx_cons_sb = tx_cons_sb; + + DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n", + txdata->cid, txdata->txq_index); + } + + #ifdef BCM_CNIC + static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) + { + return bp->cnic_base_cl_id + cl_idx + - (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; ++ (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; + } + + static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) + { + + /* the 'first' id is allocated for the cnic */ + return bp->base_fw_ndsb; + } + + static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) + { + return bp->igu_base_sb; + } + + + static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) + { + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + unsigned long q_type = 0; + + bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, + BNX2X_FCOE_ETH_CL_ID_IDX); + /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than + * 16 ETH clients per function when CNIC is enabled! + * + * Fix it ASAP!!! + */ + bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; + bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; + bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; + bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; + + bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); + + DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index); + + /* qZone id equals to FW (per path) client id */ + bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); + /* init shortcut */ + bnx2x_fcoe(bp, ustorm_rx_prods_offset) = + bnx2x_rx_ustorm_prods_offset(fp); + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* No multi-CoS for FCoE L2 client */ + BUG_ON(fp->max_cos != 1); + + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " + "igu_sb %d\n", + fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); + } + #endif + + static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) + { + int cnt = 1000; + + while (bnx2x_has_tx_work_unload(txdata)) { + if (!cnt) { + BNX2X_ERR("timeout waiting for queue[%d]: " + "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", + txdata->txq_index, txdata->tx_pkt_prod, + txdata->tx_pkt_cons); + #ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); + return -EBUSY; + #else + break; + #endif + } + cnt--; + usleep_range(1000, 1000); + } + + return 0; + } + + int bnx2x_get_link_cfg_idx(struct bnx2x *bp); + + static inline void __storm_memset_struct(struct bnx2x *bp, + u32 addr, size_t size, u32 *data) + { + int i; + for (i = 0; i < size/4; i++) + REG_WR(bp, addr + (i * 4), data[i]); + } + + static inline void storm_memset_func_cfg(struct bnx2x *bp, + struct tstorm_eth_function_common_config *tcfg, + u16 abs_fid) + { + size_t size = sizeof(struct tstorm_eth_function_common_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); + + __storm_memset_struct(bp, addr, size, (u32 *)tcfg); + } + + static inline void storm_memset_cmng(struct bnx2x *bp, + struct cmng_struct_per_port *cmng, + u8 port) + { + size_t size = sizeof(struct cmng_struct_per_port); + + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); + + __storm_memset_struct(bp, addr, size, (u32 *)cmng); + } + + /** + * bnx2x_wait_sp_comp - wait for the outstanding SP commands. + * + * @bp: driver handle + * @mask: bits that need to be cleared + */ + static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) + { + int tout = 5000; /* Wait for 5 secs tops */ + + while (tout--) { + smp_mb(); + netif_addr_lock_bh(bp->dev); + if (!(bp->sp_state & mask)) { + netif_addr_unlock_bh(bp->dev); + return true; + } + netif_addr_unlock_bh(bp->dev); + + usleep_range(1000, 1000); + } + + smp_mb(); + + netif_addr_lock_bh(bp->dev); + if (bp->sp_state & mask) { + BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " + "mask 0x%lx\n", bp->sp_state, mask); + netif_addr_unlock_bh(bp->dev); + return false; + } + netif_addr_unlock_bh(bp->dev); + + return true; + } + + /** + * bnx2x_set_ctx_validation - set CDU context validation values + * + * @bp: driver handle + * @cxt: context of the connection on the host memory + * @cid: SW CID of the connection to be configured + */ + void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid); + + void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec); + void bnx2x_acquire_phy_lock(struct bnx2x *bp); + void bnx2x_release_phy_lock(struct bnx2x *bp); + + /** + * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. + * + * @bp: driver handle + * @mf_cfg: MF configuration + * + */ + static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) + { + u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (!max_cfg) { + DP(NETIF_MSG_LINK, + "Max BW configured to 0 - using 100 instead\n"); + max_cfg = 100; + } + return max_cfg; + } + + #endif /* BNX2X_CMN_H */ diff --combined drivers/net/ethernet/broadcom/tg3.c index 0000000,b89027c..b865e9f mode 000000,100644..100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@@ -1,0 -1,15951 +1,15951 @@@ + /* + * tg3.c: Broadcom Tigon3 ethernet driver. + * + * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2005-2011 Broadcom Corporation. + * + * Firmware is: + * Derived from proprietary unpublished source code, + * Copyright (C) 2000-2003 Broadcom Corporation. + * + * Permission is hereby granted for the distribution of this firmware + * data in hexadecimal or equivalent format, provided this copyright + * notice is accompanying it. + */ + + + #include <linux/module.h> + #include <linux/moduleparam.h> + #include <linux/stringify.h> + #include <linux/kernel.h> + #include <linux/types.h> + #include <linux/compiler.h> + #include <linux/slab.h> + #include <linux/delay.h> + #include <linux/in.h> + #include <linux/init.h> + #include <linux/interrupt.h> + #include <linux/ioport.h> + #include <linux/pci.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/skbuff.h> + #include <linux/ethtool.h> + #include <linux/mdio.h> + #include <linux/mii.h> + #include <linux/phy.h> + #include <linux/brcmphy.h> + #include <linux/if_vlan.h> + #include <linux/ip.h> + #include <linux/tcp.h> + #include <linux/workqueue.h> + #include <linux/prefetch.h> + #include <linux/dma-mapping.h> + #include <linux/firmware.h> + + #include <net/checksum.h> + #include <net/ip.h> + + #include <asm/system.h> + #include <linux/io.h> + #include <asm/byteorder.h> + #include <linux/uaccess.h> + + #ifdef CONFIG_SPARC + #include <asm/idprom.h> + #include <asm/prom.h> + #endif + + #define BAR_0 0 + #define BAR_2 2 + + #include "tg3.h" + + /* Functions & macros to verify TG3_FLAGS types */ + + static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) + { + return test_bit(flag, bits); + } + + static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) + { + set_bit(flag, bits); + } + + static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) + { + clear_bit(flag, bits); + } + + #define tg3_flag(tp, flag) \ + _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) + #define tg3_flag_set(tp, flag) \ + _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) + #define tg3_flag_clear(tp, flag) \ + _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) + + #define DRV_MODULE_NAME "tg3" + #define TG3_MAJ_NUM 3 + #define TG3_MIN_NUM 120 + #define DRV_MODULE_VERSION \ + __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) + #define DRV_MODULE_RELDATE "August 18, 2011" + + #define RESET_KIND_SHUTDOWN 0 + #define RESET_KIND_INIT 1 + #define RESET_KIND_SUSPEND 2 + + #define TG3_DEF_RX_MODE 0 + #define TG3_DEF_TX_MODE 0 + #define TG3_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + + #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 + + /* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ + + #define TG3_TX_TIMEOUT (5 * HZ) + + /* hardware minimum and maximum for a single frame's data payload */ + #define TG3_MIN_MTU 60 + #define TG3_MAX_MTU(tp) \ + (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) + + /* These numbers seem to be hard coded in the NIC firmware somehow. + * You can't change the ring sizes, but you can change where you place + * them in the NIC onboard memory. + */ + #define TG3_RX_STD_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) + #define TG3_DEF_RX_RING_PENDING 200 + #define TG3_RX_JMB_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) + #define TG3_DEF_RX_JUMBO_RING_PENDING 100 + #define TG3_RSS_INDIR_TBL_SIZE 128 + + /* Do not place this n-ring entries value into the tp struct itself, + * we really want to expose these constants to GCC so that modulo et + * al. operations are done with shifts and masks instead of with + * hw multiply/modulo instructions. Another solution would be to + * replace things like '% foo' with '& (foo - 1)'. + */ + + #define TG3_TX_RING_SIZE 512 + #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) + + #define TG3_RX_STD_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) + #define TG3_RX_JMB_RING_BYTES(tp) \ + (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) + #define TG3_RX_RCB_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) + #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ + TG3_TX_RING_SIZE) + #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) + + #define TG3_DMA_BYTE_ENAB 64 + + #define TG3_RX_STD_DMA_SZ 1536 + #define TG3_RX_JMB_DMA_SZ 9046 + + #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) + + #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) + #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) + + #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) + + #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) + + /* Due to a hardware bug, the 5701 can only DMA to memory addresses + * that are at least dword aligned when used in PCIX mode. The driver + * works around this bug by double copying the packet. This workaround + * is built into the normal double copy length check for efficiency. + * + * However, the double copy is only necessary on those architectures + * where unaligned memory accesses are inefficient. For those architectures + * where unaligned memory accesses incur little penalty, we can reintegrate + * the 5701 in the normal rx path. Doing so saves a device structure + * dereference by hardcoding the double copy threshold in place. + */ + #define TG3_RX_COPY_THRESHOLD 256 + #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD + #else + #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) + #endif + + #if (NET_IP_ALIGN != 0) + #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) + #else + #define TG3_RX_OFFSET(tp) 0 + #endif + + /* minimum number of free TX descriptors required to wake up TX process */ + #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) + #define TG3_TX_BD_DMA_MAX 4096 + + #define TG3_RAW_IP_ALIGN 2 + + #define TG3_FW_UPDATE_TIMEOUT_SEC 5 + + #define FIRMWARE_TG3 "tigon/tg3.bin" + #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" + #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" + + static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; + + MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); + MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_MODULE_VERSION); + MODULE_FIRMWARE(FIRMWARE_TG3); + MODULE_FIRMWARE(FIRMWARE_TG3TSO); + MODULE_FIRMWARE(FIRMWARE_TG3TSO5); + + static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ + module_param(tg3_debug, int, 0); + MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); + + static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, + {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, + {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ + {} + }; + + MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); + + static const struct { + const char string[ETH_GSTRING_LEN]; + } ethtool_stats_keys[] = { + { "rx_octets" }, + { "rx_fragments" }, + { "rx_ucast_packets" }, + { "rx_mcast_packets" }, + { "rx_bcast_packets" }, + { "rx_fcs_errors" }, + { "rx_align_errors" }, + { "rx_xon_pause_rcvd" }, + { "rx_xoff_pause_rcvd" }, + { "rx_mac_ctrl_rcvd" }, + { "rx_xoff_entered" }, + { "rx_frame_too_long_errors" }, + { "rx_jabbers" }, + { "rx_undersize_packets" }, + { "rx_in_length_errors" }, + { "rx_out_length_errors" }, + { "rx_64_or_less_octet_packets" }, + { "rx_65_to_127_octet_packets" }, + { "rx_128_to_255_octet_packets" }, + { "rx_256_to_511_octet_packets" }, + { "rx_512_to_1023_octet_packets" }, + { "rx_1024_to_1522_octet_packets" }, + { "rx_1523_to_2047_octet_packets" }, + { "rx_2048_to_4095_octet_packets" }, + { "rx_4096_to_8191_octet_packets" }, + { "rx_8192_to_9022_octet_packets" }, + + { "tx_octets" }, + { "tx_collisions" }, + + { "tx_xon_sent" }, + { "tx_xoff_sent" }, + { "tx_flow_control" }, + { "tx_mac_errors" }, + { "tx_single_collisions" }, + { "tx_mult_collisions" }, + { "tx_deferred" }, + { "tx_excessive_collisions" }, + { "tx_late_collisions" }, + { "tx_collide_2times" }, + { "tx_collide_3times" }, + { "tx_collide_4times" }, + { "tx_collide_5times" }, + { "tx_collide_6times" }, + { "tx_collide_7times" }, + { "tx_collide_8times" }, + { "tx_collide_9times" }, + { "tx_collide_10times" }, + { "tx_collide_11times" }, + { "tx_collide_12times" }, + { "tx_collide_13times" }, + { "tx_collide_14times" }, + { "tx_collide_15times" }, + { "tx_ucast_packets" }, + { "tx_mcast_packets" }, + { "tx_bcast_packets" }, + { "tx_carrier_sense_errors" }, + { "tx_discards" }, + { "tx_errors" }, + + { "dma_writeq_full" }, + { "dma_write_prioq_full" }, + { "rxbds_empty" }, + { "rx_discards" }, + { "rx_errors" }, + { "rx_threshold_hit" }, + + { "dma_readq_full" }, + { "dma_read_prioq_full" }, + { "tx_comp_queue_full" }, + + { "ring_set_send_prod_index" }, + { "ring_status_update" }, + { "nic_irqs" }, + { "nic_avoided_irqs" }, + { "nic_tx_threshold_hit" }, + + { "mbuf_lwm_thresh_hit" }, + }; + + #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) + + + static const struct { + const char string[ETH_GSTRING_LEN]; + } ethtool_test_keys[] = { + { "nvram test (online) " }, + { "link test (online) " }, + { "register test (offline)" }, + { "memory test (offline)" }, + { "mac loopback test (offline)" }, + { "phy loopback test (offline)" }, + { "ext loopback test (offline)" }, + { "interrupt test (offline)" }, + }; + + #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) + + + static void tg3_write32(struct tg3 *tp, u32 off, u32 val) + { + writel(val, tp->regs + off); + } + + static u32 tg3_read32(struct tg3 *tp, u32 off) + { + return readl(tp->regs + off); + } + + static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) + { + writel(val, tp->aperegs + off); + } + + static u32 tg3_ape_read32(struct tg3 *tp, u32 off) + { + return readl(tp->aperegs + off); + } + + static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) + { + unsigned long flags; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + } + + static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) + { + writel(val, tp->regs + off); + readl(tp->regs + off); + } + + static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) + { + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; + } + + static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) + { + unsigned long flags; + + if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { + pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + if (off == TG3_RX_STD_PROD_IDX_REG) { + pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + + /* In indirect mode when disabling interrupts, we also need + * to clear the interrupt bit in the GRC local ctrl register. + */ + if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && + (val == 0x1)) { + pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, + tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); + } + } + + static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) + { + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; + } + + /* usec_wait specifies the wait time in usec when writing to certain registers + * where it is unsafe to read back the register without some delay. + * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. + * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. + */ + static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) + { + if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) + /* Non-posted methods */ + tp->write32(tp, off, val); + else { + /* Posted method */ + tg3_write32(tp, off, val); + if (usec_wait) + udelay(usec_wait); + tp->read32(tp, off); + } + /* Wait again after the read for the posted method to guarantee that + * the wait time is met. + */ + if (usec_wait) + udelay(usec_wait); + } + + static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) + { + tp->write32_mbox(tp, off, val); + if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) + tp->read32_mbox(tp, off); + } + + static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) + { + void __iomem *mbox = tp->regs + off; + writel(val, mbox); + if (tg3_flag(tp, TXD_MBOX_HWBUG)) + writel(val, mbox); + if (tg3_flag(tp, MBOX_WRITE_REORDER)) + readl(mbox); + } + + static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) + { + return readl(tp->regs + off + GRCMBOX_BASE); + } + + static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) + { + writel(val, tp->regs + off + GRCMBOX_BASE); + } + + #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) + #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) + #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) + #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) + #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) + + #define tw32(reg, val) tp->write32(tp, reg, val) + #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) + #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) + #define tr32(reg) tp->read32(tp, reg) + + static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) + { + unsigned long flags; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) + return; + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + tw32_f(TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); + } + + static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) + { + unsigned long flags; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { + *val = 0; + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + *val = tr32(TG3PCI_MEM_WIN_DATA); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); + } + + static void tg3_ape_lock_init(struct tg3 *tp) + { + int i; + u32 regbase, bit; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + regbase = TG3_APE_LOCK_GRANT; + else + regbase = TG3_APE_PER_LOCK_GRANT; + + /* Make sure the driver hasn't any stale locks. */ + for (i = 0; i < 8; i++) { + if (i == TG3_APE_LOCK_GPIO) + continue; + tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); + } + + /* Clear the correct bit of the GPIO lock too. */ + if (!tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit); + } + + static int tg3_ape_lock(struct tg3 *tp, int locknum) + { + int i, off; + int ret = 0; + u32 status, req, gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return 0; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return 0; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return -EINVAL; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + req = TG3_APE_LOCK_REQ; + gnt = TG3_APE_LOCK_GRANT; + } else { + req = TG3_APE_PER_LOCK_REQ; + gnt = TG3_APE_PER_LOCK_GRANT; + } + + off = 4 * locknum; + + if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) + bit = APE_LOCK_REQ_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, req + off, bit); + + /* Wait for up to 1 millisecond to acquire lock. */ + for (i = 0; i < 100; i++) { + status = tg3_ape_read32(tp, gnt + off); + if (status == bit) + break; + udelay(10); + } + + if (status != bit) { + /* Revoke the lock request. */ + tg3_ape_write32(tp, gnt + off, bit); + ret = -EBUSY; + } + + return ret; + } + + static void tg3_ape_unlock(struct tg3 *tp, int locknum) + { + u32 gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + gnt = TG3_APE_LOCK_GRANT; + else + gnt = TG3_APE_PER_LOCK_GRANT; + + if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, gnt + 4 * locknum, bit); + } + + static void tg3_ape_send_event(struct tg3 *tp, u32 event) + { + int i; + u32 apedata; + + /* NCSI does not support APE events */ + if (tg3_flag(tp, APE_HAS_NCSI)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + /* Wait for up to 1 millisecond for APE to service previous event. */ + for (i = 0; i < 10; i++) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + udelay(100); + } + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); + } + + static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) + { + u32 event; + u32 apedata; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (kind) { + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, + TG3_APE_HOST_DRVR_STATE_START); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + + if (device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE)) { + tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, + TG3_APE_HOST_WOL_SPEED_AUTO); + apedata = TG3_APE_HOST_DRVR_STATE_WOL; + } else + apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; + + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); + + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case RESET_KIND_SUSPEND: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + tg3_ape_send_event(tp, event); + } + + static void tg3_disable_ints(struct tg3 *tp) + { + int i; + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); + for (i = 0; i < tp->irq_max; i++) + tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); + } + + static void tg3_enable_ints(struct tg3 *tp) + { + int i; + + tp->irq_sync = 0; + wmb(); + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); + + tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + if (tg3_flag(tp, 1SHOT_MSI)) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + tp->coal_now |= tnapi->coal_now; + } + + /* Force an initial interrupt */ + if (!tg3_flag(tp, TAGGED_STATUS) && + (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + else + tw32(HOSTCC_MODE, tp->coal_now); + + tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); + } + + static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) + { + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int work_exists = 0; + + /* check for phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + if (sblk->status & SD_STATUS_LINK_CHG) + work_exists = 1; + } + /* check for RX/TX work to do */ + if (sblk->idx[0].tx_consumer != tnapi->tx_cons || + *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_exists = 1; + + return work_exists; + } + + /* tg3_int_reenable + * similar to tg3_enable_ints, but it accurately determines whether there + * is new work pending and can return without flushing the PIO write + * which reenables interrupts + */ + static void tg3_int_reenable(struct tg3_napi *tnapi) + { + struct tg3 *tp = tnapi->tp; + + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + + /* When doing tagged status, this work check is unnecessary. + * The last_tag we write above tells the chip which piece of + * work we've completed. + */ + if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | tnapi->coal_now); + } + + static void tg3_switch_clocks(struct tg3 *tp) + { + u32 clock_ctrl; + u32 orig_clock_ctrl; + + if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) + return; + + clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); + + orig_clock_ctrl = clock_ctrl; + clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | + CLOCK_CTRL_CLKRUN_OENABLE | + 0x1f); + tp->pci_clock_ctrl = clock_ctrl; + + if (tg3_flag(tp, 5705_PLUS)) { + if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | CLOCK_CTRL_625_CORE, 40); + } + } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | + (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), + 40); + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | (CLOCK_CTRL_ALTCLK), + 40); + } + tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); + } + + #define PHY_BUSY_LOOPS 5000 + + static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) + { + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + *val = 0x0; + + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (MI_COM_CMD_READ | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) { + *val = frame_val & MI_COM_DATA_MASK; + ret = 0; + } + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + return ret; + } + + static int tg3_writephy(struct tg3 *tp, int reg, u32 val) + { + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) + return 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (val & MI_COM_DATA_MASK); + frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) + ret = 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + return ret; + } + + static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); + + done: + return err; + } + + static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); + + done: + return err; + } + + static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; + } + + static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; + } + + static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) + { + int err; + + err = tg3_writephy(tp, MII_TG3_AUX_CTRL, + (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | + MII_TG3_AUXCTL_SHDWSEL_MISC); + if (!err) + err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); + + return err; + } + + static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) + { + if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) + set |= MII_TG3_AUXCTL_MISC_WREN; + + return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); + } + + #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ + MII_TG3_AUXCTL_ACTL_TX_6DB) + + #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_TX_6DB); + + static int tg3_bmcr_reset(struct tg3 *tp) + { + u32 phy_control; + int limit, err; + + /* OK, reset it, and poll the BMCR_RESET bit until it + * clears or we time out. + */ + phy_control = BMCR_RESET; + err = tg3_writephy(tp, MII_BMCR, phy_control); + if (err != 0) + return -EBUSY; + + limit = 5000; + while (limit--) { + err = tg3_readphy(tp, MII_BMCR, &phy_control); + if (err != 0) + return -EBUSY; + + if ((phy_control & BMCR_RESET) == 0) { + udelay(40); + break; + } + udelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; + } + + static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) + { + struct tg3 *tp = bp->priv; + u32 val; + + spin_lock_bh(&tp->lock); + + if (tg3_readphy(tp, reg, &val)) + val = -EIO; + + spin_unlock_bh(&tp->lock); + + return val; + } + + static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) + { + struct tg3 *tp = bp->priv; + u32 ret = 0; + + spin_lock_bh(&tp->lock); + + if (tg3_writephy(tp, reg, val)) + ret = -EIO; + + spin_unlock_bh(&tp->lock); + + return ret; + } + + static int tg3_mdio_reset(struct mii_bus *bp) + { + return 0; + } + + static void tg3_mdio_config_5785(struct tg3 *tp) + { + u32 val; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + val = MAC_PHYCFG2_50610_LED_MODES; + break; + case PHY_ID_BCMAC131: + val = MAC_PHYCFG2_AC131_LED_MODES; + break; + case PHY_ID_RTL8211C: + val = MAC_PHYCFG2_RTL8211C_LED_MODES; + break; + case PHY_ID_RTL8201E: + val = MAC_PHYCFG2_RTL8201E_LED_MODES; + break; + default: + return; + } + + if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RGMII_INT | + MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; + tw32(MAC_PHYCFG1, val); + + return; + } + + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) + val |= MAC_PHYCFG2_EMODE_MASK_MASK | + MAC_PHYCFG2_FMODE_MASK_MASK | + MAC_PHYCFG2_GMODE_MASK_MASK | + MAC_PHYCFG2_ACT_MASK_MASK | + MAC_PHYCFG2_QUAL_MASK_MASK | + MAC_PHYCFG2_INBAND_ENABLE; + + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | + MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; + } + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | + MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; + tw32(MAC_PHYCFG1, val); + + val = tr32(MAC_EXT_RGMII_MODE); + val &= ~(MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET | + MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET; + } + tw32(MAC_EXT_RGMII_MODE, val); + } + + static void tg3_mdio_start(struct tg3 *tp) + { + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + + if (tg3_flag(tp, MDIOBUS_INITED) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); + } + + static int tg3_mdio_init(struct tg3 *tp) + { + int i; + u32 reg; + struct phy_device *phydev; + + if (tg3_flag(tp, 5717_PLUS)) { + u32 is_serdes; + + tp->phy_addr = tp->pci_fn + 1; + + if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) + is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; + else + is_serdes = tr32(TG3_CPMU_PHY_STRAP) & + TG3_CPMU_PHY_STRAP_IS_SERDES; + if (is_serdes) + tp->phy_addr += 7; + } else + tp->phy_addr = TG3_PHY_MII_ADDR; + + tg3_mdio_start(tp); + + if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) + return 0; + + tp->mdio_bus = mdiobus_alloc(); + if (tp->mdio_bus == NULL) + return -ENOMEM; + + tp->mdio_bus->name = "tg3 mdio bus"; + snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", + (tp->pdev->bus->number << 8) | tp->pdev->devfn); + tp->mdio_bus->priv = tp; + tp->mdio_bus->parent = &tp->pdev->dev; + tp->mdio_bus->read = &tg3_mdio_read; + tp->mdio_bus->write = &tg3_mdio_write; + tp->mdio_bus->reset = &tg3_mdio_reset; + tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); + tp->mdio_bus->irq = &tp->mdio_irq[0]; + + for (i = 0; i < PHY_MAX_ADDR; i++) + tp->mdio_bus->irq[i] = PHY_POLL; + + /* The bus registration will look for all the PHYs on the mdio bus. + * Unfortunately, it does not ensure the PHY is powered up before + * accessing the PHY ID registers. A chip reset is the + * quickest way to bring the device back to an operational state.. + */ + if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) + tg3_bmcr_reset(tp); + + i = mdiobus_register(tp->mdio_bus); + if (i) { + dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); + mdiobus_free(tp->mdio_bus); + return i; + } + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (!phydev || !phydev->drv) { + dev_warn(&tp->pdev->dev, "No PHY devices\n"); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + return -ENODEV; + } + + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM57780: + phydev->interface = PHY_INTERFACE_MODE_GMII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + break; + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | + PHY_BRCM_RX_REFCLK_UNUSED | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_AUTO_PWRDWN_ENABLE; + if (tg3_flag(tp, RGMII_INBAND_DISABLE)) + phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; + /* fallthru */ + case PHY_ID_RTL8211C: + phydev->interface = PHY_INTERFACE_MODE_RGMII; + break; + case PHY_ID_RTL8201E: + case PHY_ID_BCMAC131: + phydev->interface = PHY_INTERFACE_MODE_MII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + tp->phy_flags |= TG3_PHYFLG_IS_FET; + break; + } + + tg3_flag_set(tp, MDIOBUS_INITED); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); + + return 0; + } + + static void tg3_mdio_fini(struct tg3 *tp) + { + if (tg3_flag(tp, MDIOBUS_INITED)) { + tg3_flag_clear(tp, MDIOBUS_INITED); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + } + } + + /* tp->lock is held. */ + static inline void tg3_generate_fw_event(struct tg3 *tp) + { + u32 val; + + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); + + tp->last_event_jiffies = jiffies; + } + + #define TG3_FW_EVENT_TIMEOUT_USEC 2500 + + /* tp->lock is held. */ + static void tg3_wait_for_event_ack(struct tg3 *tp) + { + int i; + unsigned int delay_cnt; + long time_remain; + + /* If enough time has passed, no wait is necessary. */ + time_remain = (long)(tp->last_event_jiffies + 1 + + usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - + (long)jiffies; + if (time_remain < 0) + return; + + /* Check if we can shorten the wait time. */ + delay_cnt = jiffies_to_usecs(time_remain); + if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) + delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; + delay_cnt = (delay_cnt >> 3) + 1; + + for (i = 0; i < delay_cnt; i++) { + if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) + break; + udelay(8); + } + } + + /* tp->lock is held. */ + static void tg3_ump_link_report(struct tg3 *tp) + { + u32 reg; + u32 val; + + if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) + return; + + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); + + val = 0; + if (!tg3_readphy(tp, MII_BMCR, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_BMSR, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); + + val = 0; + if (!tg3_readphy(tp, MII_ADVERTISE, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_LPA, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); + + val = 0; + if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { + if (!tg3_readphy(tp, MII_CTRL1000, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_STAT1000, ®)) + val |= (reg & 0xffff); + } + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); + + if (!tg3_readphy(tp, MII_PHYADDR, ®)) + val = reg << 16; + else + val = 0; + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); + + tg3_generate_fw_event(tp); + } + + /* tp->lock is held. */ + static void tg3_stop_fw(struct tg3 *tp) + { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + /* Wait for RX cpu to ACK the previous event. */ + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); + + tg3_generate_fw_event(tp); + + /* Wait for RX cpu to ACK this event. */ + tg3_wait_for_event_ack(tp); + } + } + + /* tp->lock is held. */ + static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) + { + tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC1); + + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_INIT || + kind == RESET_KIND_SUSPEND) + tg3_ape_driver_state_change(tp, kind); + } + + /* tp->lock is held. */ + static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) + { + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START_DONE); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD_DONE); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_SHUTDOWN) + tg3_ape_driver_state_change(tp, kind); + } + + /* tp->lock is held. */ + static void tg3_write_sig_legacy(struct tg3 *tp, int kind) + { + if (tg3_flag(tp, ENABLE_ASF)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } + } + + static int tg3_poll_fw(struct tg3 *tp) + { + int i; + u32 val; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* Wait up to 20ms for init done. */ + for (i = 0; i < 200; i++) { + if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) + return 0; + udelay(100); + } + return -ENODEV; + } + + /* Wait for firmware initialization to complete. */ + for (i = 0; i < 100000; i++) { + tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + udelay(10); + } + + /* Chip might not be fitted with firmware. Some Sun onboard + * parts are configured like that. So don't signal the timeout + * of the above loop as an error, but do report the lack of + * running firmware once. + */ + if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + + netdev_info(tp->dev, "No firmware running\n"); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + /* The 57765 A0 needs a little more + * time to do some important work. + */ + mdelay(10); + } + + return 0; + } + + static void tg3_link_report(struct tg3 *tp) + { + if (!netif_carrier_ok(tp->dev)) { + netif_info(tp, link, tp->dev, "Link is down\n"); + tg3_ump_link_report(tp); + } else if (netif_msg_link(tp)) { + netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", + (tp->link_config.active_speed == SPEED_1000 ? + 1000 : + (tp->link_config.active_speed == SPEED_100 ? + 100 : 10)), + (tp->link_config.active_duplex == DUPLEX_FULL ? + "full" : "half")); + + netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", + (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? + "on" : "off", + (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? + "on" : "off"); + + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + netdev_info(tp->dev, "EEE is %s\n", + tp->setlpicnt ? "enabled" : "disabled"); + + tg3_ump_link_report(tp); + } + } + + static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) + { + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_PAUSE_CAP; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_PAUSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + else + miireg = 0; + + return miireg; + } + + static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) + { + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_1000XPAUSE; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_1000XPSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; + else + miireg = 0; + + return miireg; + } + + static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) + { + u8 cap = 0; + + if (lcladv & ADVERTISE_1000XPAUSE) { + if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + else if (rmtadv & LPA_1000XPAUSE_ASYM) + cap = FLOW_CTRL_RX; + } else { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) + cap = FLOW_CTRL_TX; + } + + return cap; + } + + static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) + { + u8 autoneg; + u8 flowctrl = 0; + u32 old_rx_mode = tp->rx_mode; + u32 old_tx_mode = tp->tx_mode; + + if (tg3_flag(tp, USE_PHYLIB)) + autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; + else + autoneg = tp->link_config.autoneg; + + if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); + else + flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + } else + flowctrl = tp->link_config.flowctrl; + + tp->link_config.active_flowctrl = flowctrl; + + if (flowctrl & FLOW_CTRL_RX) + tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; + else + tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; + + if (old_rx_mode != tp->rx_mode) + tw32_f(MAC_RX_MODE, tp->rx_mode); + + if (flowctrl & FLOW_CTRL_TX) + tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; + else + tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; + + if (old_tx_mode != tp->tx_mode) + tw32_f(MAC_TX_MODE, tp->tx_mode); + } + + static void tg3_adjust_link(struct net_device *dev) + { + u8 oldflowctrl, linkmesg = 0; + u32 mac_mode, lcl_adv, rmt_adv; + struct tg3 *tp = netdev_priv(dev); + struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + spin_lock_bh(&tp->lock); + + mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | + MAC_MODE_HALF_DUPLEX); + + oldflowctrl = tp->link_config.active_flowctrl; + + if (phydev->link) { + lcl_adv = 0; + rmt_adv = 0; + + if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) + mac_mode |= MAC_MODE_PORT_MODE_MII; + else if (phydev->speed == SPEED_1000 || + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (phydev->duplex == DUPLEX_HALF) + mac_mode |= MAC_MODE_HALF_DUPLEX; + else { + lcl_adv = tg3_advert_flowctrl_1000T( + tp->link_config.flowctrl); + + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + } + + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } else + mac_mode |= MAC_MODE_PORT_MODE_GMII; + + if (mac_mode != tp->mac_mode) { + tp->mac_mode = mac_mode; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + if (phydev->speed == SPEED_10) + tw32(MAC_MI_STAT, + MAC_MI_STAT_10MBPS_MODE | + MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + else + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + } + + if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); + else + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); + + if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) || + (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) || + phydev->speed != tp->link_config.active_speed || + phydev->duplex != tp->link_config.active_duplex || + oldflowctrl != tp->link_config.active_flowctrl) + linkmesg = 1; + + tp->link_config.active_speed = phydev->speed; + tp->link_config.active_duplex = phydev->duplex; + + spin_unlock_bh(&tp->lock); + + if (linkmesg) + tg3_link_report(tp); + } + + static int tg3_phy_init(struct tg3 *tp) + { + struct phy_device *phydev; + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) + return 0; + + /* Bring the PHY back to a known state. */ + tg3_bmcr_reset(tp); + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + /* Attach the MAC to the PHY. */ + phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, + phydev->dev_flags, phydev->interface); + if (IS_ERR(phydev)) { + dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* Mask with MAC supported features. */ + switch (phydev->interface) { + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_RGMII: + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + phydev->supported &= (PHY_GBIT_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + } + /* fallthru */ + case PHY_INTERFACE_MODE_MII: + phydev->supported &= (PHY_BASIC_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + default: + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + return -EINVAL; + } + + tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; + + phydev->advertising = phydev->supported; + + return 0; + } + + static void tg3_phy_start(struct tg3 *tp) + { + struct phy_device *phydev; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + phydev->speed = tp->link_config.orig_speed; + phydev->duplex = tp->link_config.orig_duplex; + phydev->autoneg = tp->link_config.orig_autoneg; + phydev->advertising = tp->link_config.orig_advertising; + } + + phy_start(phydev); + + phy_start_aneg(phydev); + } + + static void tg3_phy_stop(struct tg3 *tp) + { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + } + + static void tg3_phy_fini(struct tg3 *tp) + { + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; + } + } + + static int tg3_phy_set_extloopbk(struct tg3 *tp) + { + int err; + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + return 0; + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + MII_TG3_AUXCTL_ACTL_EXTLOOPBK | + 0x4c20); + goto done; + } + + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (err) + return err; + + val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); + + done: + return err; + } + + static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) + { + u32 phytest; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; + else + phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; + tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } + } + + static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) + { + u32 reg; + + if (!tg3_flag(tp, 5705_PLUS) || + (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + tg3_phy_fet_toggle_apd(tp, enable); + return; + } + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_SCR5_SEL | + MII_TG3_MISC_SHDW_SCR5_LPED | + MII_TG3_MISC_SHDW_SCR5_DLPTLM | + MII_TG3_MISC_SHDW_SCR5_SDTL | + MII_TG3_MISC_SHDW_SCR5_C125OE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) + reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_APD_SEL | + MII_TG3_MISC_SHDW_APD_WKTM_84MS; + if (enable) + reg |= MII_TG3_MISC_SHDW_APD_ENABLE; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + } + + static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) + { + u32 phy; + + if (!tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 ephy; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { + u32 reg = MII_TG3_FET_SHDW_MISCCTRL; + + tg3_writephy(tp, MII_TG3_FET_TEST, + ephy | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, reg, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; + else + phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; + tg3_writephy(tp, reg, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, ephy); + } + } else { + int ret; + + ret = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); + if (!ret) { + if (enable) + phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + else + phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, phy); + } + } + } + + static void tg3_phy_set_wirespeed(struct tg3 *tp) + { + int ret; + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) + return; + + ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); + if (!ret) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, + val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); + } + + static void tg3_phy_apply_otp(struct tg3 *tp) + { + u32 otp, phy; + + if (!tp->phy_otp) + return; + + otp = tp->phy_otp; + + if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) + return; + + phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); + phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); + + phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | + ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); + + phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); + phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); + + phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); + + phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); + + phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | + ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) + { + u32 val; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + tp->setlpicnt = 0; + + if (tp->link_config.autoneg == AUTONEG_ENABLE && + current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL && + (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_1000)) { + u32 eeectl; + + if (tp->link_config.active_speed == SPEED_1000) + eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; + else + eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; + + tw32(TG3_CPMU_EEE_CTRL, eeectl); + + tg3_phy_cl45_read(tp, MDIO_MMD_AN, + TG3_CL45_D7_EEERES_STAT, &val); + + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tp->setlpicnt = 2; + } + + if (!tp->setlpicnt) { + if (current_link_up == 1 && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); + } + } + + static void tg3_phy_eee_enable(struct tg3 *tp) + { + u32 val; + + if (tp->link_config.active_speed == SPEED_1000 && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); + } + + static int tg3_wait_macro_done(struct tg3 *tp) + { + int limit = 100; + + while (limit--) { + u32 tmp32; + + if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { + if ((tmp32 & 0x1000) == 0) + break; + } + } + if (limit < 0) + return -EBUSY; + + return 0; + } + + static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) + { + static const u32 test_pat[4][6] = { + { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, + { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, + { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, + { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } + }; + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, + test_pat[chan][i]); + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + for (i = 0; i < 6; i += 2) { + u32 low, high; + + if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || + tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + low &= 0x7fff; + high &= 0x000f; + if (low != test_pat[chan][i] || + high != test_pat[chan][i+1]) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); + + return -EBUSY; + } + } + } + + return 0; + } + + static int tg3_phy_reset_chanpat(struct tg3 *tp) + { + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) + return -EBUSY; + } + + return 0; + } + + static int tg3_phy_reset_5703_4_5(struct tg3 *tp) + { + u32 reg32, phy9_orig; + int retries, do_phy_reset, err; + + retries = 10; + do_phy_reset = 1; + do { + if (do_phy_reset) { + err = tg3_bmcr_reset(tp); + if (err) + return err; + do_phy_reset = 0; + } + + /* Disable transmitter and interrupt. */ + if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) + continue; + + reg32 |= 0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + + /* Set full-duplex, 1000 mbps. */ + tg3_writephy(tp, MII_BMCR, + BMCR_FULLDPLX | BMCR_SPEED1000); + + /* Set to master mode. */ + if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) + continue; + + tg3_writephy(tp, MII_CTRL1000, + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); + + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (err) + return err; + + /* Block the PHY control access. */ + tg3_phydsp_write(tp, 0x8005, 0x0800); + + err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); + if (!err) + break; + } while (--retries); + + err = tg3_phy_reset_chanpat(tp); + if (err) + return err; + + tg3_phydsp_write(tp, 0x8005, 0x0000); + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + + tg3_writephy(tp, MII_CTRL1000, phy9_orig); + + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { + reg32 &= ~0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + } else if (!err) + err = -EBUSY; + + return err; + } + + /* This will reset the tigon3 PHY if there is no valid + * link unless the FORCE argument is non-zero. + */ + static int tg3_phy_reset(struct tg3 *tp) + { + u32 val, cpmuctrl; + int err; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + } + err = tg3_readphy(tp, MII_BMSR, &val); + err |= tg3_readphy(tp, MII_BMSR, &val); + if (err != 0) + return -EBUSY; + + if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + err = tg3_phy_reset_5703_4_5(tp); + if (err) + return err; + goto out; + } + + cpmuctrl = 0; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + cpmuctrl = tr32(TG3_CPMU_CTRL); + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) + tw32(TG3_CPMU_CTRL, + cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); + } + + err = tg3_bmcr_reset(tp); + if (err) + return err; + + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { + val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; + tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); + + tw32(TG3_CPMU_CTRL, cpmuctrl); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == + CPMU_LSPD_1000MB_MACCLK_12_5) { + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + udelay(40); + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + } + + if (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) + return 0; + + tg3_phy_apply_otp(tp); + + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + else + tg3_phy_toggle_apd(tp, false); + + out: + if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x201f, 0x2aaa); + tg3_phydsp_write(tp, 0x000a, 0x0323); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + } + + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x000a, 0x310b); + tg3_phydsp_write(tp, 0x201f, 0x9506); + tg3_phydsp_write(tp, 0x401f, 0x14e2); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); + tg3_writephy(tp, MII_TG3_TEST1, + MII_TG3_TEST1_TRIM_EN | 0x4); + } else + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + } + + /* Set Extended packet length bit (bit 14) on all chips that */ + /* support jumbo frames */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + } else if (tg3_flag(tp, JUMBO_CAPABLE)) { + /* Set bit 14 with read-modify-write to preserve other bits */ + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (!err) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); + } + + /* Set phy register 0x10 bit 0 to high fifo elasticity to support + * jumbo frames transmission. + */ + if (tg3_flag(tp, JUMBO_CAPABLE)) { + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* adjust output voltage */ + tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); + } + + tg3_phy_toggle_automdix(tp, 1); + tg3_phy_set_wirespeed(tp); + return 0; + } + + #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 + #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 + #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ + TG3_GPIO_MSG_NEED_VAUX) + #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ + ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ + (TG3_GPIO_MSG_DRVR_PRES << 4) | \ + (TG3_GPIO_MSG_DRVR_PRES << 8) | \ + (TG3_GPIO_MSG_DRVR_PRES << 12)) + + #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ + ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ + (TG3_GPIO_MSG_NEED_VAUX << 4) | \ + (TG3_GPIO_MSG_NEED_VAUX << 8) | \ + (TG3_GPIO_MSG_NEED_VAUX << 12)) + + static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) + { + u32 status, shift; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); + else + status = tr32(TG3_CPMU_DRV_STATUS); + + shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; + status &= ~(TG3_GPIO_MSG_MASK << shift); + status |= (newstat << shift); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); + else + tw32(TG3_CPMU_DRV_STATUS, status); + + return status >> TG3_APE_GPIO_MSG_SHIFT; + } + + static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) + { + if (!tg3_flag(tp, IS_NIC)) + return 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return -EIO; + + tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); + + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); + } else { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + return 0; + } + + static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) + { + u32 grc_local_ctrl; + + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) + return; + + grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) + { + if (!tg3_flag(tp, IS_NIC)) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + (GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1), + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ + u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + tp->grc_local_ctrl; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else { + u32 no_gpio2; + u32 grc_local_ctrl = 0; + + /* Workaround to prevent overdrawing Amps. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + /* On 5753 and variants, GPIO2 cannot be used. */ + no_gpio2 = tp->nic_sram_data_cfg & + NIC_SRAM_DATA_CFG_NO_GPIO2; + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + GRC_LCLCTRL_GPIO_OUTPUT2; + if (no_gpio2) { + grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT2); + } + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; + + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + if (!no_gpio2) { + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + } + } + + static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) + { + u32 msg = 0; + + /* Serialize power state transitions */ + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return; + + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) + msg = TG3_GPIO_MSG_NEED_VAUX; + + msg = tg3_set_function_status(tp, msg); + + if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) + goto done; + + if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); + + done: + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); + } + + static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) + { + bool need_vaux = false; + + /* The GPIOs do something completely different on 57765. */ + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_frob_aux_power_5717(tp, include_wol ? + tg3_flag(tp, WOL_ENABLE) != 0 : 0); + return; + } + + if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { + struct net_device *dev_peer; + + dev_peer = pci_get_drvdata(tp->pdev_peer); + + /* remove_one() may have been run on the peer. */ + if (dev_peer) { + struct tg3 *tp_peer = netdev_priv(dev_peer); + + if (tg3_flag(tp_peer, INIT_COMPLETE)) + return; + + if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || + tg3_flag(tp_peer, ENABLE_ASF)) + need_vaux = true; + } + } + + if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || + tg3_flag(tp, ENABLE_ASF)) + need_vaux = true; + + if (need_vaux) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); + } + + static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) + { + if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) + return 1; + else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { + if (speed != SPEED_10) + return 1; + } else if (speed == SPEED_10) + return 1; + + return 0; + } + + static int tg3_setup_phy(struct tg3 *, int); + static int tg3_halt_cpu(struct tg3 *, u32); + + static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) + { + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); + u32 serdes_cfg = tr32(MAC_SERDES_CFG); + + sg_dig_ctrl |= + SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; + tw32(SG_DIG_CTRL, sg_dig_ctrl); + tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); + } + return; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_bmcr_reset(tp); + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + return; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 phytest; + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_ADVERTISE, 0); + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { + phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; + tg3_writephy(tp, + MII_TG3_FET_SHDW_AUXMODE4, + phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } + return; + } else if (do_low_power) { + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); + + val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | + MII_TG3_AUXCTL_PCTL_VREG_11V; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); + } + + /* The PHY should not be powered down on some chips because + * of bugs. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) + return; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + val |= CPMU_LSPD_1000MB_MACCLK_12_5; + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + + tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); + } + + /* tp->lock is held. */ + static int tg3_nvram_lock(struct tg3 *tp) + { + if (tg3_flag(tp, NVRAM)) { + int i; + + if (tp->nvram_lock_cnt == 0) { + tw32(NVRAM_SWARB, SWARB_REQ_SET1); + for (i = 0; i < 8000; i++) { + if (tr32(NVRAM_SWARB) & SWARB_GNT1) + break; + udelay(20); + } + if (i == 8000) { + tw32(NVRAM_SWARB, SWARB_REQ_CLR1); + return -ENODEV; + } + } + tp->nvram_lock_cnt++; + } + return 0; + } + + /* tp->lock is held. */ + static void tg3_nvram_unlock(struct tg3 *tp) + { + if (tg3_flag(tp, NVRAM)) { + if (tp->nvram_lock_cnt > 0) + tp->nvram_lock_cnt--; + if (tp->nvram_lock_cnt == 0) + tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); + } + } + + /* tp->lock is held. */ + static void tg3_enable_nvram_access(struct tg3 *tp) + { + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); + } + } + + /* tp->lock is held. */ + static void tg3_disable_nvram_access(struct tg3 *tp) + { + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); + } + } + + static int tg3_nvram_read_using_eeprom(struct tg3 *tp, + u32 offset, u32 *val) + { + u32 tmp; + int i; + + if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) + return -EINVAL; + + tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | + EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, + tmp | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + ((offset << EEPROM_ADDR_ADDR_SHIFT) & + EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_READ | EEPROM_ADDR_START); + + for (i = 0; i < 1000; i++) { + tmp = tr32(GRC_EEPROM_ADDR); + + if (tmp & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(tmp & EEPROM_ADDR_COMPLETE)) + return -EBUSY; + + tmp = tr32(GRC_EEPROM_DATA); + + /* + * The data will always be opposite the native endian + * format. Perform a blind byteswap to compensate. + */ + *val = swab32(tmp); + + return 0; + } + + #define NVRAM_CMD_TIMEOUT 10000 + + static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) + { + int i; + + tw32(NVRAM_CMD, nvram_cmd); + for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { + udelay(10); + if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { + udelay(10); + break; + } + } + + if (i == NVRAM_CMD_TIMEOUT) + return -EBUSY; + + return 0; + } + + static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) + { + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr / tp->nvram_pagesize) << + ATMEL_AT45DB0X1B_PAGE_POS) + + (addr % tp->nvram_pagesize); + + return addr; + } + + static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) + { + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * + tp->nvram_pagesize) + + (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); + + return addr; + } + + /* NOTE: Data read in from NVRAM is byteswapped according to + * the byteswapping settings for all other register accesses. + * tg3 devices are BE devices, so on a BE machine, the data + * returned will be exactly as it is seen in NVRAM. On a LE + * machine, the 32-bit value will be byteswapped. + */ + static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) + { + int ret; + + if (!tg3_flag(tp, NVRAM)) + return tg3_nvram_read_using_eeprom(tp, offset, val); + + offset = tg3_nvram_phys_addr(tp, offset); + + if (offset > NVRAM_ADDR_MSK) + return -EINVAL; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + + tw32(NVRAM_ADDR, offset); + ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); + + if (ret == 0) + *val = tr32(NVRAM_RDDATA); + + tg3_disable_nvram_access(tp); + + tg3_nvram_unlock(tp); + + return ret; + } + + /* Ensures NVRAM data is in bytestream format. */ + static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) + { + u32 v; + int res = tg3_nvram_read(tp, offset, &v); + if (!res) + *val = cpu_to_be32(v); + return res; + } + + #define RX_CPU_SCRATCH_BASE 0x30000 + #define RX_CPU_SCRATCH_SIZE 0x04000 + #define TX_CPU_SCRATCH_BASE 0x34000 + #define TX_CPU_SCRATCH_SIZE 0x04000 + + /* tp->lock is held. */ + static int tg3_halt_cpu(struct tg3 *tp, u32 offset) + { + int i; + + BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val = tr32(GRC_VCPU_EXT_CTRL); + + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); + return 0; + } + if (offset == RX_CPU_BASE) { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + + tw32(offset + CPU_STATE, 0xffffffff); + tw32_f(offset + CPU_MODE, CPU_MODE_HALT); + udelay(10); + } else { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + } + + if (i >= 10000) { + netdev_err(tp->dev, "%s timed out, %s CPU\n", + __func__, offset == RX_CPU_BASE ? "RX" : "TX"); + return -ENODEV; + } + + /* Clear firmware's nvram arbitration. */ + if (tg3_flag(tp, NVRAM)) + tw32(NVRAM_SWARB, SWARB_REQ_CLR0); + return 0; + } + + struct fw_info { + unsigned int fw_base; + unsigned int fw_len; + const __be32 *fw_data; + }; + + /* tp->lock is held. */ + static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, + u32 cpu_scratch_base, int cpu_scratch_size, + struct fw_info *info) + { + int err, lock_err, i; + void (*write_op)(struct tg3 *, u32, u32); + + if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { + netdev_err(tp->dev, + "%s: Trying to load TX cpu firmware which is 5705\n", + __func__); + return -EINVAL; + } + + if (tg3_flag(tp, 5705_PLUS)) + write_op = tg3_write_mem; + else + write_op = tg3_write_indirect_reg32; + + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; + + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); + for (i = 0; i < (info->fw_len / sizeof(u32)); i++) + write_op(tp, (cpu_scratch_base + + (info->fw_base & 0xffff) + + (i * sizeof(u32))), + be32_to_cpu(info->fw_data[i])); + + err = 0; + + out: + return err; + } + + /* tp->lock is held. */ + static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) + { + struct fw_info info; + const __be32 *fw_data; + int err, i; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, + RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, + TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + /* Now startup only the RX cpu. */ + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) + break; + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " + "should be %08x\n", __func__, + tr32(RX_CPU_BASE + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); + + return 0; + } + + /* tp->lock is held. */ + static int tg3_load_tso_firmware(struct tg3 *tp) + { + struct fw_info info; + const __be32 *fw_data; + unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; + int err, i; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + return 0; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + cpu_scratch_size = tp->fw_len; + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + cpu_base = RX_CPU_BASE; + cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; + } else { + cpu_base = TX_CPU_BASE; + cpu_scratch_base = TX_CPU_SCRATCH_BASE; + cpu_scratch_size = TX_CPU_SCRATCH_SIZE; + } + + err = tg3_load_firmware_cpu(tp, cpu_base, + cpu_scratch_base, cpu_scratch_size, + &info); + if (err) + return err; + + /* Now startup the cpu. */ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(cpu_base + CPU_PC) == info.fw_base) + break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, + "%s fails to set CPU PC, is %08x should be %08x\n", + __func__, tr32(cpu_base + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); + return 0; + } + + + /* tp->lock is held. */ + static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) + { + u32 addr_high, addr_low; + int i; + + addr_high = ((tp->dev->dev_addr[0] << 8) | + tp->dev->dev_addr[1]); + addr_low = ((tp->dev->dev_addr[2] << 24) | + (tp->dev->dev_addr[3] << 16) | + (tp->dev->dev_addr[4] << 8) | + (tp->dev->dev_addr[5] << 0)); + for (i = 0; i < 4; i++) { + if (i == 1 && skip_mac_1) + continue; + tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + for (i = 0; i < 12; i++) { + tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); + } + } + + addr_high = (tp->dev->dev_addr[0] + + tp->dev->dev_addr[1] + + tp->dev->dev_addr[2] + + tp->dev->dev_addr[3] + + tp->dev->dev_addr[4] + + tp->dev->dev_addr[5]) & + TX_BACKOFF_SEED_MASK; + tw32(MAC_TX_BACKOFF_SEED, addr_high); + } + + static void tg3_enable_register_access(struct tg3 *tp) + { + /* + * Make sure register accesses (indirect or otherwise) will function + * correctly. + */ + pci_write_config_dword(tp->pdev, + TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); + } + + static int tg3_power_up(struct tg3 *tp) + { + int err; + + tg3_enable_register_access(tp); + + err = pci_set_power_state(tp->pdev, PCI_D0); + if (!err) { + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + } else { + netdev_err(tp->dev, "Transition to D0 failed\n"); + } + + return err; + } + + static int tg3_power_down_prepare(struct tg3 *tp) + { + u32 misc_host_ctrl; + bool device_should_wake, do_low_power; + + tg3_enable_register_access(tp); + + /* Restore the CLKREQ setting. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 lnkctl; + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + lnkctl); + } + + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + tw32(TG3PCI_MISC_HOST_CTRL, + misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); + + device_should_wake = device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE); + + if (tg3_flag(tp, USE_PHYLIB)) { + do_low_power = false; + if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && + !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + struct phy_device *phydev; + u32 phyid, advertising; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + + tp->link_config.orig_speed = phydev->speed; + tp->link_config.orig_duplex = phydev->duplex; + tp->link_config.orig_autoneg = phydev->autoneg; + tp->link_config.orig_advertising = phydev->advertising; + + advertising = ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half; + + if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { + if (tg3_flag(tp, WOL_SPEED_100MB)) + advertising |= + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full; + else + advertising |= ADVERTISED_10baseT_Full; + } + + phydev->advertising = advertising; + + phy_start_aneg(phydev); + + phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; + if (phyid != PHY_ID_BCMAC131) { + phyid &= PHY_BCM_OUI_MASK; + if (phyid == PHY_BCM_OUI_1 || + phyid == PHY_BCM_OUI_2 || + phyid == PHY_BCM_OUI_3) + do_low_power = true; + } + } + } else { + do_low_power = true; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + tp->link_config.orig_speed = tp->link_config.speed; + tp->link_config.orig_duplex = tp->link_config.duplex; + tp->link_config.orig_autoneg = tp->link_config.autoneg; + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + tp->link_config.speed = SPEED_10; + tp->link_config.duplex = DUPLEX_HALF; + tp->link_config.autoneg = AUTONEG_ENABLE; + tg3_setup_phy(tp, 0); + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val; + + val = tr32(GRC_VCPU_EXT_CTRL); + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); + } else if (!tg3_flag(tp, ENABLE_ASF)) { + int i; + u32 val; + + for (i = 0; i < 200; i++) { + tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + msleep(1); + } + } + if (tg3_flag(tp, WOL_CAP)) + tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | + WOL_DRV_STATE_SHUTDOWN | + WOL_DRV_WOL | + WOL_SET_MAGIC_PKT); + + if (device_should_wake) { + u32 mac_mode; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + if (do_low_power && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_PWRCTL, + MII_TG3_AUXCTL_PCTL_WOL_EN | + MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); + udelay(40); + } + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + mac_mode = MAC_MODE_PORT_MODE_GMII; + else + mac_mode = MAC_MODE_PORT_MODE_MII; + + mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700) { + u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? + SPEED_100 : SPEED_10; + if (tg3_5700_link_polarity(tp, speed)) + mac_mode |= MAC_MODE_LINK_POLARITY; + else + mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + } else { + mac_mode = MAC_MODE_PORT_MODE_TBI; + } + + if (!tg3_flag(tp, 5750_PLUS)) + tw32(MAC_LED_CTRL, tp->led_ctrl); + + mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; + if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && + (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) + mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; + + if (tg3_flag(tp, ENABLE_APE)) + mac_mode |= MAC_MODE_APE_TX_EN | + MAC_MODE_APE_RX_EN | + MAC_MODE_TDE_ENABLE; + + tw32_f(MAC_MODE, mac_mode); + udelay(100); + + tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); + udelay(10); + } + + if (!tg3_flag(tp, WOL_SPEED_100MB) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 base_val; + + base_val = tp->pci_clock_ctrl; + base_val |= (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | + CLOCK_CTRL_PWRDOWN_PLL133, 40); + } else if (tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, CPMU_PRESENT) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* do nothing */ + } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { + u32 newbits1, newbits2; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_ALTCLK); + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } else if (tg3_flag(tp, 5705_PLUS)) { + newbits1 = CLOCK_CTRL_625_CORE; + newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; + } else { + newbits1 = CLOCK_CTRL_ALTCLK; + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, + 40); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, + 40); + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 newbits3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_44MHZ_CORE); + } else { + newbits3 = CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, + tp->pci_clock_ctrl | newbits3, 40); + } + } + + if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) + tg3_power_down_phy(tp, do_low_power); + + tg3_frob_aux_power(tp, true); + + /* Workaround for unstable PLL clock */ + if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || + (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { + u32 val = tr32(0x7d00); + + val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); + tw32(0x7d00, val); + if (!tg3_flag(tp, ENABLE_ASF)) { + int err; + + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + } + } + + tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + + return 0; + } + + static void tg3_power_down(struct tg3 *tp) + { + tg3_power_down_prepare(tp); + + pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); + pci_set_power_state(tp->pdev, PCI_D3hot); + } + + static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) + { + switch (val & MII_TG3_AUX_STAT_SPDMASK) { + case MII_TG3_AUX_STAT_10HALF: + *speed = SPEED_10; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_10FULL: + *speed = SPEED_10; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_100HALF: + *speed = SPEED_100; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_100FULL: + *speed = SPEED_100; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_1000HALF: + *speed = SPEED_1000; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_1000FULL: + *speed = SPEED_1000; + *duplex = DUPLEX_FULL; + break; + + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : + SPEED_10; + *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : + DUPLEX_HALF; + break; + } + *speed = SPEED_INVALID; + *duplex = DUPLEX_INVALID; + break; + } + } + + static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) + { + int err = 0; + u32 val, new_adv; + + new_adv = ADVERTISE_CSMA; + if (advertise & ADVERTISED_10baseT_Half) + new_adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + new_adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + new_adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + new_adv |= ADVERTISE_100FULL; + + new_adv |= tg3_advert_flowctrl_1000T(flowctrl); + + err = tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (err) + goto done; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + goto done; + + new_adv = 0; + if (advertise & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000HALF; + if (advertise & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000FULL; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + goto done; + + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); + + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (!err) { + u32 err2; + + val = 0; + /* Advertise 100-BaseTX EEE ability */ + if (advertise & ADVERTISED_100baseT_Full) + val |= MDIO_AN_EEE_ADV_100TX; + /* Advertise 1000-BaseT EEE ability */ + if (advertise & ADVERTISED_1000baseT_Full) + val |= MDIO_AN_EEE_ADV_1000T; + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + if (err) + val = 0; + + switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + case ASIC_REV_5717: + case ASIC_REV_57765: + case ASIC_REV_5719: + /* If we advertised any eee advertisements above... */ + if (val) + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO | + MII_TG3_DSP_TAP26_OPCSINPT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + /* Fall through */ + case ASIC_REV_5720: + if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | + MII_TG3_DSP_CH34TP2_HIBW01); + } + + err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + if (!err) + err = err2; + } + + done: + return err; + } + + static void tg3_phy_copper_begin(struct tg3 *tp) + { + u32 new_adv; + int i; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + new_adv = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) + new_adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + + tg3_phy_autoneg_cfg(tp, new_adv, + FLOW_CTRL_TX | FLOW_CTRL_RX); + } else if (tp->link_config.speed == SPEED_INVALID) { + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->link_config.advertising &= + ~(ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + } else { + /* Asking for a specific link mode. */ + if (tp->link_config.speed == SPEED_1000) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_1000baseT_Full; + else + new_adv = ADVERTISED_1000baseT_Half; + } else if (tp->link_config.speed == SPEED_100) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_100baseT_Full; + else + new_adv = ADVERTISED_100baseT_Half; + } else { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_10baseT_Full; + else + new_adv = ADVERTISED_10baseT_Half; + } + + tg3_phy_autoneg_cfg(tp, new_adv, + tp->link_config.flowctrl); + } + + if (tp->link_config.autoneg == AUTONEG_DISABLE && + tp->link_config.speed != SPEED_INVALID) { + u32 bmcr, orig_bmcr; + + tp->link_config.active_speed = tp->link_config.speed; + tp->link_config.active_duplex = tp->link_config.duplex; + + bmcr = 0; + switch (tp->link_config.speed) { + default: + case SPEED_10: + break; + + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + + case SPEED_1000: + bmcr |= BMCR_SPEED1000; + break; + } + + if (tp->link_config.duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + + if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && + (bmcr != orig_bmcr)) { + tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); + for (i = 0; i < 1500; i++) { + u32 tmp; + + udelay(10); + if (tg3_readphy(tp, MII_BMSR, &tmp) || + tg3_readphy(tp, MII_BMSR, &tmp)) + continue; + if (!(tmp & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + tg3_writephy(tp, MII_BMCR, bmcr); + udelay(40); + } + } else { + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } + } + + static int tg3_init_5401phy_dsp(struct tg3 *tp) + { + int err; + + /* Turn off tap power management. */ + /* Set Extended packet length bit */ + err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + + err |= tg3_phydsp_write(tp, 0x0012, 0x1804); + err |= tg3_phydsp_write(tp, 0x0013, 0x1204); + err |= tg3_phydsp_write(tp, 0x8006, 0x0132); + err |= tg3_phydsp_write(tp, 0x8006, 0x0232); + err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); + + udelay(40); + + return err; + } + + static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) + { + u32 adv_reg, all_mask = 0; + + if (mask & ADVERTISED_10baseT_Half) + all_mask |= ADVERTISE_10HALF; + if (mask & ADVERTISED_10baseT_Full) + all_mask |= ADVERTISE_10FULL; + if (mask & ADVERTISED_100baseT_Half) + all_mask |= ADVERTISE_100HALF; + if (mask & ADVERTISED_100baseT_Full) + all_mask |= ADVERTISE_100FULL; + + if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) + return 0; + + if ((adv_reg & ADVERTISE_ALL) != all_mask) + return 0; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 tg3_ctrl; + + all_mask = 0; + if (mask & ADVERTISED_1000baseT_Half) + all_mask |= ADVERTISE_1000HALF; + if (mask & ADVERTISED_1000baseT_Full) + all_mask |= ADVERTISE_1000FULL; + + if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) + return 0; + + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); + if (tg3_ctrl != all_mask) + return 0; + } + + return 1; + } + + static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) + { + u32 curadv, reqadv; + + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return 1; + + curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); + + if (tp->link_config.active_duplex == DUPLEX_FULL) { + if (curadv != reqadv) + return 0; + + if (tg3_flag(tp, PAUSE_AUTONEG)) + tg3_readphy(tp, MII_LPA, rmtadv); + } else { + /* Reprogram the advertisement register, even if it + * does not affect the current link. If the link + * gets renegotiated in the future, we can save an + * additional renegotiation cycle by advertising + * it correctly in the first place. + */ + if (curadv != reqadv) { + *lcladv &= ~(ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); + } + } + + return 1; + } + + static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) + { + int current_link_up; + u32 bmsr, val; + u32 lcl_adv, rmt_adv; + u16 current_speed; + u8 current_duplex; + int i, err; + + tw32(MAC_EVENT, 0); + + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); + + /* Some third-party PHYs need to be reset on link going + * down. + */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && + netif_carrier_ok(tp->dev)) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + !(bmsr & BMSR_LSTATUS)) + force_reset = 1; + } + if (force_reset) + tg3_phy_reset(tp); + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (tg3_readphy(tp, MII_BMSR, &bmsr) || + !tg3_flag(tp, INIT_COMPLETE)) + bmsr = 0; + + if (!(bmsr & BMSR_LSTATUS)) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + tg3_readphy(tp, MII_BMSR, &bmsr); + for (i = 0; i < 1000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + + if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == + TG3_PHY_REV_BCM5401_B0 && + !(bmsr & BMSR_LSTATUS) && + tp->link_config.active_speed == SPEED_1000) { + err = tg3_phy_reset(tp); + if (!err) + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + } + } + } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { + /* 5701 {A0,B0} CRC bug workaround */ + tg3_writephy(tp, 0x15, 0x0a75); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + } + + /* Clear pending interrupts... */ + tg3_readphy(tp, MII_TG3_ISTAT, &val); + tg3_readphy(tp, MII_TG3_ISTAT, &val); + + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) + tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); + else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) + tg3_writephy(tp, MII_TG3_IMASK, ~0); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + else + tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); + } + + current_link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + &val); + if (!err && !(val & (1 << 10))) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + val | (1 << 10)); + goto relink; + } + } + + bmsr = 0; + for (i = 0; i < 100; i++) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + break; + udelay(40); + } + + if (bmsr & BMSR_LSTATUS) { + u32 aux_stat, bmcr; + + tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); + for (i = 0; i < 2000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && + aux_stat) + break; + } + + tg3_aux_stat_to_speed_duplex(tp, aux_stat, + ¤t_speed, + ¤t_duplex); + + bmcr = 0; + for (i = 0; i < 200; i++) { + tg3_readphy(tp, MII_BMCR, &bmcr); + if (tg3_readphy(tp, MII_BMCR, &bmcr)) + continue; + if (bmcr && bmcr != 0x7fff) + break; + udelay(10); + } + + lcl_adv = 0; + rmt_adv = 0; + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + if ((bmcr & BMCR_ANENABLE) && + tg3_copper_is_advertising_all(tp, + tp->link_config.advertising)) { + if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, + &rmt_adv)) + current_link_up = 1; + } + } else { + if (!(bmcr & BMCR_ANENABLE) && + tp->link_config.speed == current_speed && + tp->link_config.duplex == current_duplex && + tp->link_config.flowctrl == + tp->link_config.active_flowctrl) { + current_link_up = 1; + } + } + + if (current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } + + relink: + if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tg3_phy_copper_begin(tp); + + tg3_readphy(tp, MII_BMSR, &bmsr); + if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || + (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + current_link_up = 1; + } + + tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; + if (current_link_up == 1) { + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + if (current_link_up == 1 && + tg3_5700_link_polarity(tp, tp->link_config.active_speed)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + else + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + + /* ??? Without this setting Netgear GA302T PHY does not + * ??? send/receive packets... + */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && + tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { + tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tg3_phy_eee_adjust(tp, current_link_up); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + /* Polled via timer. */ + tw32_f(MAC_EVENT, 0); + } else { + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + } + udelay(40); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && + current_link_up == 1 && + tp->link_config.active_speed == SPEED_1000 && + (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { + udelay(120); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + tg3_write_mem(tp, + NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC2); + } + + /* Prevent send BD corruption. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 oldlnkctl, newlnkctl; + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &oldlnkctl); + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; + else + newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; + if (newlnkctl != oldlnkctl) + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + + PCI_EXP_LNKCTL, newlnkctl); + } + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } + + return 0; + } + + struct tg3_fiber_aneginfo { + int state; + #define ANEG_STATE_UNKNOWN 0 + #define ANEG_STATE_AN_ENABLE 1 + #define ANEG_STATE_RESTART_INIT 2 + #define ANEG_STATE_RESTART 3 + #define ANEG_STATE_DISABLE_LINK_OK 4 + #define ANEG_STATE_ABILITY_DETECT_INIT 5 + #define ANEG_STATE_ABILITY_DETECT 6 + #define ANEG_STATE_ACK_DETECT_INIT 7 + #define ANEG_STATE_ACK_DETECT 8 + #define ANEG_STATE_COMPLETE_ACK_INIT 9 + #define ANEG_STATE_COMPLETE_ACK 10 + #define ANEG_STATE_IDLE_DETECT_INIT 11 + #define ANEG_STATE_IDLE_DETECT 12 + #define ANEG_STATE_LINK_OK 13 + #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 + #define ANEG_STATE_NEXT_PAGE_WAIT 15 + + u32 flags; + #define MR_AN_ENABLE 0x00000001 + #define MR_RESTART_AN 0x00000002 + #define MR_AN_COMPLETE 0x00000004 + #define MR_PAGE_RX 0x00000008 + #define MR_NP_LOADED 0x00000010 + #define MR_TOGGLE_TX 0x00000020 + #define MR_LP_ADV_FULL_DUPLEX 0x00000040 + #define MR_LP_ADV_HALF_DUPLEX 0x00000080 + #define MR_LP_ADV_SYM_PAUSE 0x00000100 + #define MR_LP_ADV_ASYM_PAUSE 0x00000200 + #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 + #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 + #define MR_LP_ADV_NEXT_PAGE 0x00001000 + #define MR_TOGGLE_RX 0x00002000 + #define MR_NP_RX 0x00004000 + + #define MR_LINK_OK 0x80000000 + + unsigned long link_time, cur_time; + + u32 ability_match_cfg; + int ability_match_count; + + char ability_match, idle_match, ack_match; + + u32 txconfig, rxconfig; + #define ANEG_CFG_NP 0x00000080 + #define ANEG_CFG_ACK 0x00000040 + #define ANEG_CFG_RF2 0x00000020 + #define ANEG_CFG_RF1 0x00000010 + #define ANEG_CFG_PS2 0x00000001 + #define ANEG_CFG_PS1 0x00008000 + #define ANEG_CFG_HD 0x00004000 + #define ANEG_CFG_FD 0x00002000 + #define ANEG_CFG_INVAL 0x00001f06 + + }; + #define ANEG_OK 0 + #define ANEG_DONE 1 + #define ANEG_TIMER_ENAB 2 + #define ANEG_FAILED -1 + + #define ANEG_STATE_SETTLE_TIME 10000 + + static int tg3_fiber_aneg_smachine(struct tg3 *tp, + struct tg3_fiber_aneginfo *ap) + { + u16 flowctrl; + unsigned long delta; + u32 rx_cfg_reg; + int ret; + + if (ap->state == ANEG_STATE_UNKNOWN) { + ap->rxconfig = 0; + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + } + ap->cur_time++; + + if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { + rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); + + if (rx_cfg_reg != ap->ability_match_cfg) { + ap->ability_match_cfg = rx_cfg_reg; + ap->ability_match = 0; + ap->ability_match_count = 0; + } else { + if (++ap->ability_match_count > 1) { + ap->ability_match = 1; + ap->ability_match_cfg = rx_cfg_reg; + } + } + if (rx_cfg_reg & ANEG_CFG_ACK) + ap->ack_match = 1; + else + ap->ack_match = 0; + + ap->idle_match = 0; + } else { + ap->idle_match = 1; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->ack_match = 0; + + rx_cfg_reg = 0; + } + + ap->rxconfig = rx_cfg_reg; + ret = ANEG_OK; + + switch (ap->state) { + case ANEG_STATE_UNKNOWN: + if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) + ap->state = ANEG_STATE_AN_ENABLE; + + /* fallthru */ + case ANEG_STATE_AN_ENABLE: + ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); + if (ap->flags & MR_AN_ENABLE) { + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + + ap->state = ANEG_STATE_RESTART_INIT; + } else { + ap->state = ANEG_STATE_DISABLE_LINK_OK; + } + break; + + case ANEG_STATE_RESTART_INIT: + ap->link_time = ap->cur_time; + ap->flags &= ~(MR_NP_LOADED); + ap->txconfig = 0; + tw32(MAC_TX_AUTO_NEG, 0); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ret = ANEG_TIMER_ENAB; + ap->state = ANEG_STATE_RESTART; + + /* fallthru */ + case ANEG_STATE_RESTART: + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) + ap->state = ANEG_STATE_ABILITY_DETECT_INIT; + else + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_DISABLE_LINK_OK: + ret = ANEG_DONE; + break; + + case ANEG_STATE_ABILITY_DETECT_INIT: + ap->flags &= ~(MR_TOGGLE_TX); + ap->txconfig = ANEG_CFG_FD; + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + ap->txconfig |= ANEG_CFG_PS1; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + ap->txconfig |= ANEG_CFG_PS2; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ABILITY_DETECT; + break; + + case ANEG_STATE_ABILITY_DETECT: + if (ap->ability_match != 0 && ap->rxconfig != 0) + ap->state = ANEG_STATE_ACK_DETECT_INIT; + break; + + case ANEG_STATE_ACK_DETECT_INIT: + ap->txconfig |= ANEG_CFG_ACK; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ACK_DETECT; + + /* fallthru */ + case ANEG_STATE_ACK_DETECT: + if (ap->ack_match != 0) { + if ((ap->rxconfig & ~ANEG_CFG_ACK) == + (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { + ap->state = ANEG_STATE_COMPLETE_ACK_INIT; + } else { + ap->state = ANEG_STATE_AN_ENABLE; + } + } else if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + } + break; + + case ANEG_STATE_COMPLETE_ACK_INIT: + if (ap->rxconfig & ANEG_CFG_INVAL) { + ret = ANEG_FAILED; + break; + } + ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | + MR_LP_ADV_HALF_DUPLEX | + MR_LP_ADV_SYM_PAUSE | + MR_LP_ADV_ASYM_PAUSE | + MR_LP_ADV_REMOTE_FAULT1 | + MR_LP_ADV_REMOTE_FAULT2 | + MR_LP_ADV_NEXT_PAGE | + MR_TOGGLE_RX | + MR_NP_RX); + if (ap->rxconfig & ANEG_CFG_FD) + ap->flags |= MR_LP_ADV_FULL_DUPLEX; + if (ap->rxconfig & ANEG_CFG_HD) + ap->flags |= MR_LP_ADV_HALF_DUPLEX; + if (ap->rxconfig & ANEG_CFG_PS1) + ap->flags |= MR_LP_ADV_SYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_PS2) + ap->flags |= MR_LP_ADV_ASYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_RF1) + ap->flags |= MR_LP_ADV_REMOTE_FAULT1; + if (ap->rxconfig & ANEG_CFG_RF2) + ap->flags |= MR_LP_ADV_REMOTE_FAULT2; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_LP_ADV_NEXT_PAGE; + + ap->link_time = ap->cur_time; + + ap->flags ^= (MR_TOGGLE_TX); + if (ap->rxconfig & 0x0008) + ap->flags |= MR_TOGGLE_RX; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_NP_RX; + ap->flags |= MR_PAGE_RX; + + ap->state = ANEG_STATE_COMPLETE_ACK; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_COMPLETE_ACK: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + if ((ap->txconfig & ANEG_CFG_NP) == 0 && + !(ap->flags & MR_NP_RX)) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + ret = ANEG_FAILED; + } + } + } + break; + + case ANEG_STATE_IDLE_DETECT_INIT: + ap->link_time = ap->cur_time; + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_IDLE_DETECT; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_IDLE_DETECT: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + /* XXX another gem from the Broadcom driver :( */ + ap->state = ANEG_STATE_LINK_OK; + } + break; + + case ANEG_STATE_LINK_OK: + ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); + ret = ANEG_DONE; + break; + + case ANEG_STATE_NEXT_PAGE_WAIT_INIT: + /* ??? unimplemented */ + break; + + case ANEG_STATE_NEXT_PAGE_WAIT: + /* ??? unimplemented */ + break; + + default: + ret = ANEG_FAILED; + break; + } + + return ret; + } + + static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) + { + int res = 0; + struct tg3_fiber_aneginfo aninfo; + int status = ANEG_FAILED; + unsigned int tick; + u32 tmp; + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; + tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); + udelay(40); + + memset(&aninfo, 0, sizeof(aninfo)); + aninfo.flags |= MR_AN_ENABLE; + aninfo.state = ANEG_STATE_UNKNOWN; + aninfo.cur_time = 0; + tick = 0; + while (++tick < 195000) { + status = tg3_fiber_aneg_smachine(tp, &aninfo); + if (status == ANEG_DONE || status == ANEG_FAILED) + break; + + udelay(1); + } + + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + *txflags = aninfo.txconfig; + *rxflags = aninfo.flags; + + if (status == ANEG_DONE && + (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | + MR_LP_ADV_FULL_DUPLEX))) + res = 1; + + return res; + } + + static void tg3_init_bcm8002(struct tg3 *tp) + { + u32 mac_status = tr32(MAC_STATUS); + int i; + + /* Reset when initting first time or we have a link. */ + if (tg3_flag(tp, INIT_COMPLETE) && + !(mac_status & MAC_STATUS_PCS_SYNCED)) + return; + + /* Set PLL lock range. */ + tg3_writephy(tp, 0x16, 0x8007); + + /* SW reset */ + tg3_writephy(tp, MII_BMCR, BMCR_RESET); + + /* Wait for reset to complete. */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 500; i++) + udelay(10); + + /* Config mode; select PMA/Ch 1 regs. */ + tg3_writephy(tp, 0x10, 0x8411); + + /* Enable auto-lock and comdet, select txclk for tx. */ + tg3_writephy(tp, 0x11, 0x0a10); + + tg3_writephy(tp, 0x18, 0x00a0); + tg3_writephy(tp, 0x16, 0x41ff); + + /* Assert and deassert POR. */ + tg3_writephy(tp, 0x13, 0x0400); + udelay(40); + tg3_writephy(tp, 0x13, 0x0000); + + tg3_writephy(tp, 0x11, 0x0a50); + udelay(40); + tg3_writephy(tp, 0x11, 0x0a10); + + /* Wait for signal to stabilize */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 15000; i++) + udelay(10); + + /* Deselect the channel register so we can read the PHYID + * later. + */ + tg3_writephy(tp, 0x10, 0x8011); + } + + static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) + { + u16 flowctrl; + u32 sg_dig_ctrl, sg_dig_status; + u32 serdes_cfg, expected_sg_dig_ctrl; + int workaround, port_a; + int current_link_up; + + serdes_cfg = 0; + expected_sg_dig_ctrl = 0; + workaround = 0; + port_a = 1; + current_link_up = 0; + + if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { + workaround = 1; + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + port_a = 0; + + /* preserve bits 0-11,13,14 for signal pre-emphasis */ + /* preserve bits 20-23 for voltage regulator */ + serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; + } + + sg_dig_ctrl = tr32(SG_DIG_CTRL); + + if (tp->link_config.autoneg != AUTONEG_ENABLE) { + if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + } + if (mac_status & MAC_STATUS_PCS_SYNCED) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + } + goto out; + } + + /* Want auto-negotiation. */ + expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; + + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; + + if (sg_dig_ctrl != expected_sg_dig_ctrl) { + if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && + tp->serdes_counter && + ((mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_RCVD_CFG)) == + MAC_STATUS_PCS_SYNCED)) { + tp->serdes_counter--; + current_link_up = 1; + goto out; + } + restart_autoneg: + if (workaround) + tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); + udelay(5); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); + + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + sg_dig_status = tr32(SG_DIG_STATUS); + mac_status = tr32(MAC_STATUS); + + if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && + (mac_status & MAC_STATUS_PCS_SYNCED)) { + u32 local_adv = 0, remote_adv = 0; + + if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) + local_adv |= ADVERTISE_1000XPAUSE; + if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) + remote_adv |= LPA_1000XPAUSE; + if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tg3_setup_flow_control(tp, local_adv, remote_adv); + current_link_up = 1; + tp->serdes_counter = 0; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { + if (tp->serdes_counter) + tp->serdes_counter--; + else { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + udelay(40); + + /* Link parallel detection - link is up */ + /* only if we have PCS_SYNC and not */ + /* receiving config code words */ + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + tp->phy_flags |= + TG3_PHYFLG_PARALLEL_DETECT; + tp->serdes_counter = + SERDES_PARALLEL_DET_TIMEOUT; + } else + goto restart_autoneg; + } + } + } else { + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + + out: + return current_link_up; + } + + static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) + { + int current_link_up = 0; + + if (!(mac_status & MAC_STATUS_PCS_SYNCED)) + goto out; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 txflags, rxflags; + int i; + + if (fiber_autoneg(tp, &txflags, &rxflags)) { + u32 local_adv = 0, remote_adv = 0; + + if (txflags & ANEG_CFG_PS1) + local_adv |= ADVERTISE_1000XPAUSE; + if (txflags & ANEG_CFG_PS2) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (rxflags & MR_LP_ADV_SYM_PAUSE) + remote_adv |= LPA_1000XPAUSE; + if (rxflags & MR_LP_ADV_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tg3_setup_flow_control(tp, local_adv, remote_adv); + + current_link_up = 1; + } + for (i = 0; i < 30; i++) { + udelay(20); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + if ((tr32(MAC_STATUS) & + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if (current_link_up == 0 && + (mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) + current_link_up = 1; + } else { + tg3_setup_flow_control(tp, 0, 0); + + /* Forcing 1000FD link up. */ + current_link_up = 1; + + tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + + out: + return current_link_up; + } + + static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) + { + u32 orig_pause_cfg; + u16 orig_active_speed; + u8 orig_active_duplex; + u32 mac_status; + int current_link_up; + int i; + + orig_pause_cfg = tp->link_config.active_flowctrl; + orig_active_speed = tp->link_config.active_speed; + orig_active_duplex = tp->link_config.active_duplex; + + if (!tg3_flag(tp, HW_AUTONEG) && + netif_carrier_ok(tp->dev) && + tg3_flag(tp, INIT_COMPLETE)) { + mac_status = tr32(MAC_STATUS); + mac_status &= (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_RCVD_CFG); + if (mac_status == (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + return 0; + } + } + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + if (tp->phy_id == TG3_PHY_ID_BCM8002) + tg3_init_bcm8002(tp); + + /* Enable link change event even when serdes polling. */ + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + udelay(40); + + current_link_up = 0; + mac_status = tr32(MAC_STATUS); + + if (tg3_flag(tp, HW_AUTONEG)) + current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); + else + current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); + + tp->napi[0].hw_status->status = + (SD_STATUS_UPDATED | + (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); + + for (i = 0; i < 100; i++) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(5); + if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_LNKSTATE_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { + current_link_up = 0; + if (tp->link_config.autoneg == AUTONEG_ENABLE && + tp->serdes_counter == 0) { + tw32_f(MAC_MODE, (tp->mac_mode | + MAC_MODE_SEND_CONFIGS)); + udelay(1); + tw32_f(MAC_MODE, tp->mac_mode); + } + } + + if (current_link_up == 1) { + tp->link_config.active_speed = SPEED_1000; + tp->link_config.active_duplex = DUPLEX_FULL; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON)); + } else { + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE)); + } + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } else { + u32 now_pause_cfg = tp->link_config.active_flowctrl; + if (orig_pause_cfg != now_pause_cfg || + orig_active_speed != tp->link_config.active_speed || + orig_active_duplex != tp->link_config.active_duplex) + tg3_link_report(tp); + } + + return 0; + } + + static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) + { + int current_link_up, err = 0; + u32 bmsr, bmcr; + u16 current_speed; + u8 current_duplex; + u32 local_adv, remote_adv; + + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tw32(MAC_EVENT, 0); + + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + + if (force_reset) + tg3_phy_reset(tp); + + current_link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + + err |= tg3_readphy(tp, MII_BMCR, &bmcr); + + if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + /* do nothing, just check for link up at the end */ + } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 adv, new_adv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); + + new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + + if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000XHALF; + if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000XFULL; + + if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, new_adv); + bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; + tg3_writephy(tp, MII_BMCR, bmcr); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + return err; + } + } else { + u32 new_bmcr; + + bmcr &= ~BMCR_SPEED1000; + new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); + + if (tp->link_config.duplex == DUPLEX_FULL) + new_bmcr |= BMCR_FULLDPLX; + + if (new_bmcr != bmcr) { + /* BMCR_SPEED1000 is a reserved bit that needs + * to be set on write. + */ + new_bmcr |= BMCR_SPEED1000; + + /* Force a linkdown */ + if (netif_carrier_ok(tp->dev)) { + u32 adv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + adv &= ~(ADVERTISE_1000XFULL | + ADVERTISE_1000XHALF | + ADVERTISE_SLCT); + tg3_writephy(tp, MII_ADVERTISE, adv); + tg3_writephy(tp, MII_BMCR, bmcr | + BMCR_ANRESTART | + BMCR_ANENABLE); + udelay(10); + netif_carrier_off(tp->dev); + } + tg3_writephy(tp, MII_BMCR, new_bmcr); + bmcr = new_bmcr; + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + } + + if (bmsr & BMSR_LSTATUS) { + current_speed = SPEED_1000; + current_link_up = 1; + if (bmcr & BMCR_FULLDPLX) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + + local_adv = 0; + remote_adv = 0; + + if (bmcr & BMCR_ANENABLE) { + u32 common; + + err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); + err |= tg3_readphy(tp, MII_LPA, &remote_adv); + common = local_adv & remote_adv; + if (common & (ADVERTISE_1000XHALF | + ADVERTISE_1000XFULL)) { + if (common & ADVERTISE_1000XFULL) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + } else if (!tg3_flag(tp, 5780_CLASS)) { + /* Link is up via parallel detect */ + } else { + current_link_up = 0; + } + } + } + + if (current_link_up == 1 && current_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, local_adv, remote_adv); + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else { + netif_carrier_off(tp->dev); + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + tg3_link_report(tp); + } + return err; + } + + static void tg3_serdes_parallel_detect(struct tg3 *tp) + { + if (tp->serdes_counter) { + /* Give autoneg time to complete. */ + tp->serdes_counter--; + return; + } + + if (!netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE)) { + u32 bmcr; + + tg3_readphy(tp, MII_BMCR, &bmcr); + if (bmcr & BMCR_ANENABLE) { + u32 phy1, phy2; + + /* Select shadow register 0x1f */ + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); + tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + + if ((phy1 & 0x10) && !(phy2 & 0x20)) { + /* We have signal detect and not receiving + * config code words, link is up by parallel + * detection. + */ + + bmcr &= ~BMCR_ANENABLE; + bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; + tg3_writephy(tp, MII_BMCR, bmcr); + tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; + } + } + } else if (netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE) && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + u32 phy2; + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + if (phy2 & 0x20) { + u32 bmcr; + + /* Config code words received, turn on autoneg. */ + tg3_readphy(tp, MII_BMCR, &bmcr); + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); + + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + } + } + } + + static int tg3_setup_phy(struct tg3 *tp, int force_reset) + { + u32 val; + int err; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + err = tg3_setup_fiber_phy(tp, force_reset); + else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + err = tg3_setup_fiber_mii_phy(tp, force_reset); + else + err = tg3_setup_copper_phy(tp, force_reset); + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + u32 scale; + + val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; + if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) + scale = 65; + else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) + scale = 6; + else + scale = 12; + + val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; + val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + } + + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + if (tp->link_config.active_speed == SPEED_1000 && + tp->link_config.active_duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, val | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); + else + tw32(MAC_TX_LENGTHS, val | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); + + if (!tg3_flag(tp, 5705_PLUS)) { + if (netif_carrier_ok(tp->dev)) { + tw32(HOSTCC_STAT_COAL_TICKS, + tp->coal.stats_block_coalesce_usecs); + } else { + tw32(HOSTCC_STAT_COAL_TICKS, 0); + } + } + + if (tg3_flag(tp, ASPM_WORKAROUND)) { + val = tr32(PCIE_PWR_MGMT_THRESH); + if (!netif_carrier_ok(tp->dev)) + val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | + tp->pwrmgmt_thresh; + else + val |= PCIE_PWR_MGMT_L1_THRESH_MSK; + tw32(PCIE_PWR_MGMT_THRESH, val); + } + + return err; + } + + static inline int tg3_irq_sync(struct tg3 *tp) + { + return tp->irq_sync; + } + + static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) + { + int i; + + dst = (u32 *)((u8 *)dst + off); + for (i = 0; i < len; i += sizeof(u32)) + *dst++ = tr32(off + i); + } + + static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) + { + tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); + tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); + tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); + tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); + tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); + tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); + tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); + tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); + tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); + tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); + tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); + tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); + tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); + tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); + + if (tg3_flag(tp, SUPPORT_MSIX)) + tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); + + tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); + tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); + tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); + + if (!tg3_flag(tp, 5705_PLUS)) { + tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); + } + + tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); + tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); + tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); + tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); + + if (tg3_flag(tp, NVRAM)) + tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); + } + + static void tg3_dump_state(struct tg3 *tp) + { + int i; + u32 *regs; + + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); + if (!regs) { + netdev_err(tp->dev, "Failed allocating register dump buffer\n"); + return; + } + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Read up to but not including private PCI registers */ + for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) + regs[i / sizeof(u32)] = tr32(i); + } else + tg3_dump_legacy_regs(tp, regs); + + for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { + if (!regs[i + 0] && !regs[i + 1] && + !regs[i + 2] && !regs[i + 3]) + continue; + + netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + i * 4, + regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); + } + + kfree(regs); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + /* SW status block */ + netdev_err(tp->dev, + "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + i, + tnapi->hw_status->status, + tnapi->hw_status->status_tag, + tnapi->hw_status->rx_jumbo_consumer, + tnapi->hw_status->rx_consumer, + tnapi->hw_status->rx_mini_consumer, + tnapi->hw_status->idx[0].rx_producer, + tnapi->hw_status->idx[0].tx_consumer); + + netdev_err(tp->dev, + "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", + i, + tnapi->last_tag, tnapi->last_irq_tag, + tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, + tnapi->rx_rcb_ptr, + tnapi->prodring.rx_std_prod_idx, + tnapi->prodring.rx_std_cons_idx, + tnapi->prodring.rx_jmb_prod_idx, + tnapi->prodring.rx_jmb_cons_idx); + } + } + + /* This is called whenever we suspect that the system chipset is re- + * ordering the sequence of MMIO to the tx send mailbox. The symptom + * is bogus tx completions. We try to recover by setting the + * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later + * in the workqueue. + */ + static void tg3_tx_recover(struct tg3 *tp) + { + BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || + tp->write32_tx_mbox == tg3_write_indirect_mbox); + + netdev_warn(tp->dev, + "The system may be re-ordering memory-mapped I/O " + "cycles to the network device, attempting to recover. " + "Please report the problem to the driver maintainer " + "and include system chipset information.\n"); + + spin_lock(&tp->lock); + tg3_flag_set(tp, TX_RECOVERY_PENDING); + spin_unlock(&tp->lock); + } + + static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) + { + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + return tnapi->tx_pending - + ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); + } + + /* Tigon3 never reports partial packet sends. So we do not + * need special logic to handle SKBs that have not had all + * of their frags sent yet, like SunGEM does. + */ + static void tg3_tx(struct tg3_napi *tnapi) + { + struct tg3 *tp = tnapi->tp; + u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; + u32 sw_idx = tnapi->tx_cons; + struct netdev_queue *txq; + int index = tnapi - tp->napi; + + if (tg3_flag(tp, ENABLE_TSS)) + index--; + + txq = netdev_get_tx_queue(tp->dev, index); + + while (sw_idx != hw_idx) { + struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; + struct sk_buff *skb = ri->skb; + int i, tx_bug = 0; + + if (unlikely(skb == NULL)) { + tg3_tx_recover(tp); + return; + } + + pci_unmap_single(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + ri->skb = NULL; + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + ri = &tnapi->tx_buffers[sw_idx]; + if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) + tx_bug = 1; + + pci_unmap_page(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + } + + dev_kfree_skb(skb); + + if (unlikely(tx_bug)) { + tg3_tx_recover(tp); + return; + } + } + + tnapi->tx_cons = sw_idx; + + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } + } + + static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) + { + if (!ri->skb) + return; + + pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), + map_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(ri->skb); + ri->skb = NULL; + } + + /* Returns size of skb allocated or < 0 on error. + * + * We only need to fill in the address because the other members + * of the RX descriptor are invariant, see tg3_init_rings. + * + * Note the purposeful assymetry of cpu vs. chip accesses. For + * posting buffers we only dirty the first cache line of the RX + * descriptor (containing the address). Whereas for the RX status + * buffers the cpu only reads the last cacheline of the RX descriptor + * (to fetch the error flags, vlan tag, checksum, and opaque cookie). + */ + static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, + u32 opaque_key, u32 dest_idx_unmasked) + { + struct tg3_rx_buffer_desc *desc; + struct ring_info *map; + struct sk_buff *skb; + dma_addr_t mapping; + int skb_size, dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + desc = &tpr->rx_std[dest_idx]; + map = &tpr->rx_std_buffers[dest_idx]; + skb_size = tp->rx_pkt_map_sz; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + desc = &tpr->rx_jmb[dest_idx].std; + map = &tpr->rx_jmb_buffers[dest_idx]; + skb_size = TG3_RX_JMB_MAP_SZ; + break; + + default: + return -EINVAL; + } + + /* Do not overwrite any of the map or rp information + * until we are sure we can commit to a new buffer. + * + * Callers depend upon this behavior and assume that + * we leave everything unchanged if we fail. + */ + skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp)); + if (skb == NULL) + return -ENOMEM; + + skb_reserve(skb, TG3_RX_OFFSET(tp)); + + mapping = pci_map_single(tp->pdev, skb->data, skb_size, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + return -EIO; + } + + map->skb = skb; + dma_unmap_addr_set(map, mapping, mapping); + + desc->addr_hi = ((u64)mapping >> 32); + desc->addr_lo = ((u64)mapping & 0xffffffff); + + return skb_size; + } + + /* We only need to move over in the address because the other + * members of the RX descriptor are invariant. See notes above + * tg3_alloc_rx_skb for full details. + */ + static void tg3_recycle_rx(struct tg3_napi *tnapi, + struct tg3_rx_prodring_set *dpr, + u32 opaque_key, int src_idx, + u32 dest_idx_unmasked) + { + struct tg3 *tp = tnapi->tp; + struct tg3_rx_buffer_desc *src_desc, *dest_desc; + struct ring_info *src_map, *dest_map; + struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; + int dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + dest_desc = &dpr->rx_std[dest_idx]; + dest_map = &dpr->rx_std_buffers[dest_idx]; + src_desc = &spr->rx_std[src_idx]; + src_map = &spr->rx_std_buffers[src_idx]; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + dest_desc = &dpr->rx_jmb[dest_idx].std; + dest_map = &dpr->rx_jmb_buffers[dest_idx]; + src_desc = &spr->rx_jmb[src_idx].std; + src_map = &spr->rx_jmb_buffers[src_idx]; + break; + + default: + return; + } + + dest_map->skb = src_map->skb; + dma_unmap_addr_set(dest_map, mapping, + dma_unmap_addr(src_map, mapping)); + dest_desc->addr_hi = src_desc->addr_hi; + dest_desc->addr_lo = src_desc->addr_lo; + + /* Ensure that the update to the skb happens after the physical + * addresses have been transferred to the new BD location. + */ + smp_wmb(); + + src_map->skb = NULL; + } + + /* The RX ring scheme is composed of multiple rings which post fresh + * buffers to the chip, and one special ring the chip uses to report + * status back to the host. + * + * The special ring reports the status of received packets to the + * host. The chip does not write into the original descriptor the + * RX buffer was obtained from. The chip simply takes the original + * descriptor as provided by the host, updates the status and length + * field, then writes this into the next status ring entry. + * + * Each ring the host uses to post buffers to the chip is described + * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, + * it is first placed into the on-chip ram. When the packet's length + * is known, it walks down the TG3_BDINFO entries to select the ring. + * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO + * which is within the range of the new packet's length is chosen. + * + * The "separate ring for rx status" scheme may sound queer, but it makes + * sense from a cache coherency perspective. If only the host writes + * to the buffer post rings, and only the chip writes to the rx status + * rings, then cache lines never move beyond shared-modified state. + * If both the host and chip were to write into the same ring, cache line + * eviction could occur since both entities want it in an exclusive state. + */ + static int tg3_rx(struct tg3_napi *tnapi, int budget) + { + struct tg3 *tp = tnapi->tp; + u32 work_mask, rx_std_posted = 0; + u32 std_prod_idx, jmb_prod_idx; + u32 sw_idx = tnapi->rx_rcb_ptr; + u16 hw_idx; + int received; + struct tg3_rx_prodring_set *tpr = &tnapi->prodring; + + hw_idx = *(tnapi->rx_rcb_prod_idx); + /* + * We need to order the read of hw_idx and the read of + * the opaque cookie. + */ + rmb(); + work_mask = 0; + received = 0; + std_prod_idx = tpr->rx_std_prod_idx; + jmb_prod_idx = tpr->rx_jmb_prod_idx; + while (sw_idx != hw_idx && budget > 0) { + struct ring_info *ri; + struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; + unsigned int len; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 opaque_key, desc_idx, *post_ptr; + + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + if (opaque_key == RXD_OPAQUE_RING_STD) { + ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &std_prod_idx; + rx_std_posted++; + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &jmb_prod_idx; + } else + goto next_pkt_nopost; + + work_mask |= opaque_key; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { + drop_it: + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + drop_it_no_recycle: + /* Other statistics kept track of by card. */ + tp->rx_dropped++; + goto next_pkt; + } + + len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - + ETH_FCS_LEN; + + if (len > TG3_RX_COPY_THRESH(tp)) { + int skb_size; + + skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, + *post_ptr); + if (skb_size < 0) + goto drop_it; + + pci_unmap_single(tp->pdev, dma_addr, skb_size, + PCI_DMA_FROMDEVICE); + + /* Ensure that the update to the skb happens + * after the usage of the old DMA mapping. + */ + smp_wmb(); + + ri->skb = NULL; + + skb_put(skb, len); + } else { + struct sk_buff *copy_skb; + + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + + copy_skb = netdev_alloc_skb(tp->dev, len + + TG3_RAW_IP_ALIGN); + if (copy_skb == NULL) + goto drop_it_no_recycle; + + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); + skb_put(copy_skb, len); + pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(skb, copy_skb->data, len); + pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + + /* We'll reuse the original ring buffer. */ + skb = copy_skb; + } + + if ((tp->dev->features & NETIF_F_RXCSUM) && + (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT) == 0xffff)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + skb->protocol = eth_type_trans(skb, tp->dev); + + if (len > (tp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(ETH_P_8021Q)) { + dev_kfree_skb(skb); + goto drop_it_no_recycle; + } + + if (desc->type_flags & RXD_FLAG_VLAN && + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, + desc->err_vlan & RXD_VLAN_MASK); + + napi_gro_receive(&tnapi->napi, skb); + + received++; + budget--; + + next_pkt: + (*post_ptr)++; + + if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + work_mask &= ~RXD_OPAQUE_RING_STD; + rx_std_posted = 0; + } + next_pkt_nopost: + sw_idx++; + sw_idx &= tp->rx_ret_ring_mask; + + /* Refresh hw_idx to see if there is new work */ + if (sw_idx == hw_idx) { + hw_idx = *(tnapi->rx_rcb_prod_idx); + rmb(); + } + } + + /* ACK the status ring. */ + tnapi->rx_rcb_ptr = sw_idx; + tw32_rx_mbox(tnapi->consmbox, sw_idx); + + /* Refill RX ring(s). */ + if (!tg3_flag(tp, ENABLE_RSS)) { + if (work_mask & RXD_OPAQUE_RING_STD) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + } + if (work_mask & RXD_OPAQUE_RING_JUMBO) { + tpr->rx_jmb_prod_idx = jmb_prod_idx & + tp->rx_jmb_ring_mask; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + tpr->rx_jmb_prod_idx); + } + mmiowb(); + } else if (work_mask) { + /* rx_std_buffers[] and rx_jmb_buffers[] entries must be + * updated before the producer indices can be updated. + */ + smp_wmb(); + + tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; + tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; + + if (tnapi != &tp->napi[1]) + napi_schedule(&tp->napi[1].napi); + } + + return received; + } + + static void tg3_poll_link(struct tg3 *tp) + { + /* handle link change and other phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + struct tg3_hw_status *sblk = tp->napi[0].hw_status; + + if (sblk->status & SD_STATUS_LINK_CHG) { + sblk->status = SD_STATUS_UPDATED | + (sblk->status & ~SD_STATUS_LINK_CHG); + spin_lock(&tp->lock); + if (tg3_flag(tp, USE_PHYLIB)) { + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + } else + tg3_setup_phy(tp, 0); + spin_unlock(&tp->lock); + } + } + } + + static int tg3_rx_prodring_xfer(struct tg3 *tp, + struct tg3_rx_prodring_set *dpr, + struct tg3_rx_prodring_set *spr) + { + u32 si, di, cpycnt, src_prod_idx; + int i, err = 0; + + while (1) { + src_prod_idx = spr->rx_std_prod_idx; + + /* Make sure updates to the rx_std_buffers[] entries and the + * standard producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_std_cons_idx == src_prod_idx) + break; + + if (spr->rx_std_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_std_cons_idx; + else + cpycnt = tp->rx_std_ring_mask + 1 - + spr->rx_std_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); + + si = spr->rx_std_cons_idx; + di = dpr->rx_std_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_std_buffers[i].skb) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_std_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_std_buffers[di], + &spr->rx_std_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_std[si]; + dbd = &dpr->rx_std[di]; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & + tp->rx_std_ring_mask; + dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & + tp->rx_std_ring_mask; + } + + while (1) { + src_prod_idx = spr->rx_jmb_prod_idx; + + /* Make sure updates to the rx_jmb_buffers[] entries and + * the jumbo producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_jmb_cons_idx == src_prod_idx) + break; + + if (spr->rx_jmb_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; + else + cpycnt = tp->rx_jmb_ring_mask + 1 - + spr->rx_jmb_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); + + si = spr->rx_jmb_cons_idx; + di = dpr->rx_jmb_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_jmb_buffers[i].skb) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_jmb_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_jmb_buffers[di], + &spr->rx_jmb_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_jmb[si].std; + dbd = &dpr->rx_jmb[di].std; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & + tp->rx_jmb_ring_mask; + dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & + tp->rx_jmb_ring_mask; + } + + return err; + } + + static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) + { + struct tg3 *tp = tnapi->tp; + + /* run TX completion thread */ + if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { + tg3_tx(tnapi); + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + return work_done; + } + + /* run RX thread, within the bounds set by NAPI. + * All RX "locking" is done by ensuring outside + * code synchronizes with tg3->napi.poll() + */ + if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_done += tg3_rx(tnapi, budget - work_done); + + if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { + struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; + int i, err = 0; + u32 std_prod_idx = dpr->rx_std_prod_idx; + u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; + + for (i = 1; i < tp->irq_cnt; i++) + err |= tg3_rx_prodring_xfer(tp, dpr, + &tp->napi[i].prodring); + + wmb(); + + if (std_prod_idx != dpr->rx_std_prod_idx) + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + dpr->rx_std_prod_idx); + + if (jmb_prod_idx != dpr->rx_jmb_prod_idx) + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + dpr->rx_jmb_prod_idx); + + mmiowb(); + + if (err) + tw32_f(HOSTCC_MODE, tp->coal_now); + } + + return work_done; + } + + static int tg3_poll_msix(struct napi_struct *napi, int budget) + { + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + + /* check for RX/TX work to do */ + if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && + *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { + napi_complete(napi); + /* Reenable interrupts. */ + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + break; + } + } + + return work_done; + + tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); + return work_done; + } + + static void tg3_process_error(struct tg3 *tp) + { + u32 val; + bool real_error = false; + + if (tg3_flag(tp, ERROR_PROCESSED)) + return; + + /* Check Flow Attention register */ + val = tr32(HOSTCC_FLOW_ATTN); + if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { + netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { + netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { + netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); + real_error = true; + } + + if (!real_error) + return; + + tg3_dump_state(tp); + + tg3_flag_set(tp, ERROR_PROCESSED); + schedule_work(&tp->reset_task); + } + + static int tg3_poll(struct napi_struct *napi, int budget) + { + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + if (sblk->status & SD_STATUS_ERROR) + tg3_process_error(tp); + + tg3_poll_link(tp); + + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + if (tg3_flag(tp, TAGGED_STATUS)) { + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + } else + sblk->status &= ~SD_STATUS_UPDATED; + + if (likely(!tg3_has_work(tnapi))) { + napi_complete(napi); + tg3_int_reenable(tnapi); + break; + } + } + + return work_done; + + tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); + return work_done; + } + + static void tg3_napi_disable(struct tg3 *tp) + { + int i; + + for (i = tp->irq_cnt - 1; i >= 0; i--) + napi_disable(&tp->napi[i].napi); + } + + static void tg3_napi_enable(struct tg3 *tp) + { + int i; + + for (i = 0; i < tp->irq_cnt; i++) + napi_enable(&tp->napi[i].napi); + } + + static void tg3_napi_init(struct tg3 *tp) + { + int i; + + netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); + for (i = 1; i < tp->irq_cnt; i++) + netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); + } + + static void tg3_napi_fini(struct tg3 *tp) + { + int i; + + for (i = 0; i < tp->irq_cnt; i++) + netif_napi_del(&tp->napi[i].napi); + } + + static inline void tg3_netif_stop(struct tg3 *tp) + { + tp->dev->trans_start = jiffies; /* prevent tx timeout */ + tg3_napi_disable(tp); + netif_tx_disable(tp->dev); + } + + static inline void tg3_netif_start(struct tg3 *tp) + { + /* NOTE: unconditional netif_tx_wake_all_queues is only + * appropriate so long as all callers are assured to + * have free tx slots (such as after tg3_init_hw) + */ + netif_tx_wake_all_queues(tp->dev); + + tg3_napi_enable(tp); + tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; + tg3_enable_ints(tp); + } + + static void tg3_irq_quiesce(struct tg3 *tp) + { + int i; + + BUG_ON(tp->irq_sync); + + tp->irq_sync = 1; + smp_mb(); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); + } + + /* Fully shutdown all tg3 driver activity elsewhere in the system. + * If irq_sync is non-zero, then the IRQ handler must be synchronized + * with as well. Most of the time, this is not necessary except when + * shutting down the device. + */ + static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) + { + spin_lock_bh(&tp->lock); + if (irq_sync) + tg3_irq_quiesce(tp); + } + + static inline void tg3_full_unlock(struct tg3 *tp) + { + spin_unlock_bh(&tp->lock); + } + + /* One-shot MSI handler - Chip automatically disables interrupt + * after sending MSI so driver doesn't have to do it. + */ + static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_HANDLED; + } + + /* MSI ISR - No need to check for interrupt sharing and no need to + * flush status block and interrupt mailbox. PCI ordering rules + * guarantee that MSI will arrive after the status block. + */ + static irqreturn_t tg3_msi(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + */ + tw32_mailbox(tnapi->int_mbox, 0x00000001); + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_RETVAL(1); + } + + static irqreturn_t tg3_interrupt(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + if (tg3_irq_sync(tp)) + goto out; + sblk->status &= ~SD_STATUS_UPDATED; + if (likely(tg3_has_work(tnapi))) { + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + napi_schedule(&tnapi->napi); + } else { + /* No work, shared interrupt perhaps? re-enable + * interrupts, and flush that PCI write + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + 0x00000000); + } + out: + return IRQ_RETVAL(handled); + } + + static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + + /* + * In a shared interrupt configuration, sometimes other devices' + * interrupts will scream. We record the current status tag here + * so that the above check can report that the screaming interrupts + * are unhandled. Eventually they will be silenced. + */ + tnapi->last_irq_tag = sblk->status_tag; + + if (tg3_irq_sync(tp)) + goto out; + + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + napi_schedule(&tnapi->napi); + + out: + return IRQ_RETVAL(handled); + } + + /* ISR for interrupt test */ + static irqreturn_t tg3_test_isr(int irq, void *dev_id) + { + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + + if ((sblk->status & SD_STATUS_UPDATED) || + !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + tg3_disable_ints(tp); + return IRQ_RETVAL(1); + } + return IRQ_RETVAL(0); + } + + static int tg3_init_hw(struct tg3 *, int); + static int tg3_halt(struct tg3 *, int, int); + + /* Restart hardware after configuration changes, self-test, etc. + * Invoked with tp->lock held. + */ + static int tg3_restart_hw(struct tg3 *tp, int reset_phy) + __releases(tp->lock) + __acquires(tp->lock) + { + int err; + + err = tg3_init_hw(tp, reset_phy); + if (err) { + netdev_err(tp->dev, + "Failed to re-initialize device, aborting\n"); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); + del_timer_sync(&tp->timer); + tp->irq_sync = 0; + tg3_napi_enable(tp); + dev_close(tp->dev); + tg3_full_lock(tp, 0); + } + return err; + } + + #ifdef CONFIG_NET_POLL_CONTROLLER + static void tg3_poll_controller(struct net_device *dev) + { + int i; + struct tg3 *tp = netdev_priv(dev); + + for (i = 0; i < tp->irq_cnt; i++) + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); + } + #endif + + static void tg3_reset_task(struct work_struct *work) + { + struct tg3 *tp = container_of(work, struct tg3, reset_task); + int err; + unsigned int restart_timer; + + tg3_full_lock(tp, 0); + + if (!netif_running(tp->dev)) { + tg3_full_unlock(tp); + return; + } + + tg3_full_unlock(tp); + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + restart_timer = tg3_flag(tp, RESTART_TIMER); + tg3_flag_clear(tp, RESTART_TIMER); + + if (tg3_flag(tp, TX_RECOVERY_PENDING)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + tp->write32_rx_mbox = tg3_write_flush_reg32; + tg3_flag_set(tp, MBOX_WRITE_REORDER); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + } + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + err = tg3_init_hw(tp, 1); + if (err) + goto out; + + tg3_netif_start(tp); + + if (restart_timer) + mod_timer(&tp->timer, jiffies + 1); + + out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + } + + static void tg3_tx_timeout(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + + if (netif_msg_tx_err(tp)) { + netdev_err(dev, "transmit timed out, resetting\n"); + tg3_dump_state(tp); + } + + schedule_work(&tp->reset_task); + } + + /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ + static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) + { + u32 base = (u32) mapping & 0xffffffff; + + return (base > 0xffffdcc0) && (base + len + 8 < base); + } + + /* Test for DMA addresses > 40-bit */ + static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, + int len) + { + #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) + if (tg3_flag(tp, 40BIT_DMA_BUG)) + return ((u64) mapping + len) > DMA_BIT_MASK(40); + return 0; + #else + return 0; + #endif + } + + static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, + dma_addr_t mapping, u32 len, u32 flags, + u32 mss, u32 vlan) + { + txbd->addr_hi = ((u64) mapping >> 32); + txbd->addr_lo = ((u64) mapping & 0xffffffff); + txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); + txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); + } + + static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, + dma_addr_t map, u32 len, u32 flags, + u32 mss, u32 vlan) + { + struct tg3 *tp = tnapi->tp; + bool hwbug = false; + + if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) + hwbug = 1; + + if (tg3_4g_overflow_test(map, len)) + hwbug = 1; + + if (tg3_40bit_overflow_test(tp, map, len)) + hwbug = 1; + + if (tg3_flag(tp, 4K_FIFO_LIMIT)) { + u32 tmp_flag = flags & ~TXD_FLAG_END; + while (len > TG3_TX_BD_DMA_MAX) { + u32 frag_len = TG3_TX_BD_DMA_MAX; + len -= TG3_TX_BD_DMA_MAX; + + if (len) { + tnapi->tx_buffers[*entry].fragmented = true; + /* Avoid the 8byte DMA problem */ + if (len <= 8) { + len += TG3_TX_BD_DMA_MAX / 2; + frag_len = TG3_TX_BD_DMA_MAX / 2; + } + } else + tmp_flag = flags; + + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + frag_len, tmp_flag, mss, vlan); + (*budget)--; + *entry = NEXT_TX(*entry); + } else { + hwbug = 1; + break; + } + + map += frag_len; + } + + if (len) { + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + (*budget)--; + *entry = NEXT_TX(*entry); + } else { + hwbug = 1; + } + } + } else { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *entry = NEXT_TX(*entry); + } + + return hwbug; + } + + static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) + { + int i; + struct sk_buff *skb; + struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; + + skb = txb->skb; + txb->skb = NULL; + + pci_unmap_single(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + + for (i = 0; i < last; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + + pci_unmap_page(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_frag_size(frag), PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + } + } + + /* Workaround 4GB and 40-bit hardware DMA bugs. */ + static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, - struct sk_buff *skb, ++ struct sk_buff **pskb, + u32 *entry, u32 *budget, + u32 base_flags, u32 mss, u32 vlan) + { + struct tg3 *tp = tnapi->tp; - struct sk_buff *new_skb; ++ struct sk_buff *new_skb, *skb = *pskb; + dma_addr_t new_addr = 0; + int ret = 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + new_skb = skb_copy(skb, GFP_ATOMIC); + else { + int more_headroom = 4 - ((unsigned long)skb->data & 3); + + new_skb = skb_copy_expand(skb, + skb_headroom(skb) + more_headroom, + skb_tailroom(skb), GFP_ATOMIC); + } + + if (!new_skb) { + ret = -1; + } else { + /* New SKB is guaranteed to be linear. */ + new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, + PCI_DMA_TODEVICE); + /* Make sure the mapping succeeded */ + if (pci_dma_mapping_error(tp->pdev, new_addr)) { + dev_kfree_skb(new_skb); + ret = -1; + } else { + base_flags |= TXD_FLAG_END; + + tnapi->tx_buffers[*entry].skb = new_skb; + dma_unmap_addr_set(&tnapi->tx_buffers[*entry], + mapping, new_addr); + + if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, + new_skb->len, base_flags, + mss, vlan)) { + tg3_tx_skb_unmap(tnapi, *entry, 0); + dev_kfree_skb(new_skb); + ret = -1; + } + } + } + + dev_kfree_skb(skb); - ++ *pskb = new_skb; + return ret; + } + + static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); + + /* Use GSO to workaround a rare TSO bug that may be triggered when the + * TSO header is greater than 80 bytes. + */ + static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) + { + struct sk_buff *segs, *nskb; + u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; + + /* Estimate the number of fragments in the worst case */ + if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { + netif_stop_queue(tp->dev); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) + return NETDEV_TX_BUSY; + + netif_wake_queue(tp->dev); + } + + segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) + goto tg3_tso_bug_end; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + tg3_start_xmit(nskb, tp->dev); + } while (segs); + + tg3_tso_bug_end: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; + } + + /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and + * support TG3_FLAG_HW_TSO_1 or firmware TSO only. + */ + static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + u32 len, entry, base_flags, mss, vlan = 0; + u32 budget; + int i = -1, would_hit_hwbug; + dma_addr_t mapping; + struct tg3_napi *tnapi; + struct netdev_queue *txq; + unsigned int last; + + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + tnapi = &tp->napi[skb_get_queue_mapping(skb)]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + budget = tg3_tx_avail(tnapi); + + /* We are running in BH disabled context with netif_tx_lock + * and TX reclaim runs via tp->napi.poll inside of a software + * interrupt. Furthermore, IRQ processing runs lockless so we have + * no IRQ context deadlocks to worry about either. Rejoice! + */ + if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + + /* This is a hard error, log it. */ + netdev_err(dev, + "BUG! Tx Ring full when queue awake!\n"); + } + return NETDEV_TX_BUSY; + } + + entry = tnapi->tx_prod; + base_flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + struct iphdr *iph; + u32 tcp_opt_len, hdr_len; + + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + goto out_unlock; + } + + iph = ip_hdr(skb); + tcp_opt_len = tcp_optlen(skb); + + if (skb_is_gso_v6(skb)) { + hdr_len = skb_headlen(skb) - ETH_HLEN; + } else { + u32 ip_tcp_len; + + ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); + hdr_len = ip_tcp_len + tcp_opt_len; + + iph->check = 0; + iph->tot_len = htons(mss + hdr_len); + } + + if (unlikely((ETH_HLEN + hdr_len) > 80) && + tg3_flag(tp, TSO_BUG)) + return tg3_tso_bug(tp, skb); + + base_flags |= (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + tcp_hdr(skb)->check = 0; + base_flags &= ~TXD_FLAG_TCPUDP_CSUM; + } else + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + mss |= (tsflags << 11); + } + } else { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + base_flags |= tsflags << 12; + } + } + } + + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + !mss && skb->len > VLAN_ETH_FRAME_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + + if (vlan_tx_tag_present(skb)) { + base_flags |= TXD_FLAG_VLAN; + vlan = vlan_tx_tag_get(skb); + } + + len = skb_headlen(skb); + + mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + goto out_unlock; + } + + tnapi->tx_buffers[entry].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); + + would_hit_hwbug = 0; + + if (tg3_flag(tp, 5701_DMA_BUG)) + would_hit_hwbug = 1; + + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | + ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), + mss, vlan)) + would_hit_hwbug = 1; + + /* Now loop through additional data fragments, and queue them. */ + if (skb_shinfo(skb)->nr_frags > 0) { + u32 tmp_mss = mss; + + if (!tg3_flag(tp, HW_TSO_1) && + !tg3_flag(tp, HW_TSO_2) && + !tg3_flag(tp, HW_TSO_3)) + tmp_mss = 0; + + last = skb_shinfo(skb)->nr_frags - 1; + for (i = 0; i <= last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, + len, DMA_TO_DEVICE); + + tnapi->tx_buffers[entry].skb = NULL; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + mapping); + if (dma_mapping_error(&tp->pdev->dev, mapping)) + goto dma_error; + + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, + len, base_flags | + ((i == last) ? TXD_FLAG_END : 0), + tmp_mss, vlan)) + would_hit_hwbug = 1; + } + } + + if (would_hit_hwbug) { + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + + /* If the workaround fails due to memory/mapping + * failure, silently drop this packet. + */ + entry = tnapi->tx_prod; + budget = tg3_tx_avail(tnapi); - if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget, ++ if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, + base_flags, mss, vlan)) + goto out_unlock; + } + + skb_tx_timestamp(skb); + + /* Packets are ready, update Tx producer idx local and on card. */ + tw32_tx_mbox(tnapi->prodmbox, entry); + + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) + netif_tx_wake_queue(txq); + } + + out_unlock: + mmiowb(); + + return NETDEV_TX_OK; + + dma_error: + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + dev_kfree_skb(skb); + tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; + return NETDEV_TX_OK; + } + + static void tg3_mac_loopback(struct tg3 *tp, bool enable) + { + if (enable) { + tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | + MAC_MODE_PORT_MODE_MASK); + + tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; + + if (!tg3_flag(tp, 5705_PLUS)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else { + tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; + + if (tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + + tw32(MAC_MODE, tp->mac_mode); + udelay(40); + } + + static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) + { + u32 val, bmcr, mac_mode, ptest = 0; + + tg3_phy_toggle_apd(tp, false); + tg3_phy_toggle_automdix(tp, 0); + + if (extlpbk && tg3_phy_set_extloopbk(tp)) + return -EIO; + + bmcr = BMCR_FULLDPLX; + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + case SPEED_1000: + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + speed = SPEED_100; + bmcr |= BMCR_SPEED100; + } else { + speed = SPEED_1000; + bmcr |= BMCR_SPEED1000; + } + } + + if (extlpbk) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_readphy(tp, MII_CTRL1000, &val); + val |= CTL1000_AS_MASTER | + CTL1000_ENABLE_MASTER; + tg3_writephy(tp, MII_CTRL1000, val); + } else { + ptest = MII_TG3_FET_PTEST_TRIM_SEL | + MII_TG3_FET_PTEST_TRIM_2; + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); + } + } else + bmcr |= BMCR_LOOPBACK; + + tg3_writephy(tp, MII_BMCR, bmcr); + + /* The write needs to be flushed for the FETs */ + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tg3_readphy(tp, MII_BMCR, &bmcr); + + udelay(40); + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | + MII_TG3_FET_PTEST_FRC_TX_LINK | + MII_TG3_FET_PTEST_FRC_TX_LOCK); + + /* The write needs to be flushed for the AC131 */ + tg3_readphy(tp, MII_TG3_FET_PTEST, &val); + } + + /* Reset to prevent losing 1st rx packet intermittently */ + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + tw32_f(MAC_RX_MODE, tp->rx_mode); + } + + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + if (speed == SPEED_1000) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; + + if (masked_phy_id == TG3_PHY_ID_BCM5401) + mac_mode &= ~MAC_MODE_LINK_POLARITY; + else if (masked_phy_id == TG3_PHY_ID_BCM5411) + mac_mode |= MAC_MODE_LINK_POLARITY; + + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + } + + tw32(MAC_MODE, mac_mode); + udelay(40); + + return 0; + } + + static void tg3_set_loopback(struct net_device *dev, u32 features) + { + struct tg3 *tp = netdev_priv(dev); + + if (features & NETIF_F_LOOPBACK) { + if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, true); + netif_carrier_on(tp->dev); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode enabled.\n"); + } else { + if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, false); + /* Force link status check */ + tg3_setup_phy(tp, 1); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode disabled.\n"); + } + } + + static u32 tg3_fix_features(struct net_device *dev, u32 features) + { + struct tg3 *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) + features &= ~NETIF_F_ALL_TSO; + + return features; + } + + static int tg3_set_features(struct net_device *dev, u32 features) + { + u32 changed = dev->features ^ features; + + if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) + tg3_set_loopback(dev, features); + + return 0; + } + + static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, + int new_mtu) + { + dev->mtu = new_mtu; + + if (new_mtu > ETH_DATA_LEN) { + if (tg3_flag(tp, 5780_CLASS)) { + netdev_update_features(dev); + tg3_flag_clear(tp, TSO_CAPABLE); + } else { + tg3_flag_set(tp, JUMBO_RING_ENABLE); + } + } else { + if (tg3_flag(tp, 5780_CLASS)) { + tg3_flag_set(tp, TSO_CAPABLE); + netdev_update_features(dev); + } + tg3_flag_clear(tp, JUMBO_RING_ENABLE); + } + } + + static int tg3_change_mtu(struct net_device *dev, int new_mtu) + { + struct tg3 *tp = netdev_priv(dev); + int err; + + if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) + return -EINVAL; + + if (!netif_running(dev)) { + /* We'll just catch it later when the + * device is up'd. + */ + tg3_set_mtu(dev, tp, new_mtu); + return 0; + } + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + + tg3_set_mtu(dev, tp, new_mtu); + + err = tg3_restart_hw(tp, 0); + + if (!err) + tg3_netif_start(tp); + + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; + } + + static void tg3_rx_prodring_free(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) + { + int i; + + if (tpr != &tp->napi[0].prodring) { + for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; + i = (i + 1) & tp->rx_std_ring_mask) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE)) { + for (i = tpr->rx_jmb_cons_idx; + i != tpr->rx_jmb_prod_idx; + i = (i + 1) & tp->rx_jmb_ring_mask) { + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } + } + + return; + } + + for (i = 0; i <= tp->rx_std_ring_mask; i++) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } + } + + /* Initialize rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ + static int tg3_rx_prodring_alloc(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) + { + u32 i, rx_pkt_dma_sz; + + tpr->rx_std_cons_idx = 0; + tpr->rx_std_prod_idx = 0; + tpr->rx_jmb_cons_idx = 0; + tpr->rx_jmb_prod_idx = 0; + + if (tpr != &tp->napi[0].prodring) { + memset(&tpr->rx_std_buffers[0], 0, + TG3_RX_STD_BUFF_RING_SIZE(tp)); + if (tpr->rx_jmb_buffers) + memset(&tpr->rx_jmb_buffers[0], 0, + TG3_RX_JMB_BUFF_RING_SIZE(tp)); + goto done; + } + + /* Zero out all descriptors. */ + memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); + + rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; + if (tg3_flag(tp, 5780_CLASS) && + tp->dev->mtu > ETH_DATA_LEN) + rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; + tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); + + /* Initialize invariants of the rings, we only set this + * stuff once. This works because the card does not + * write into the rx buffer posting rings. + */ + for (i = 0; i <= tp->rx_std_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_std[i]; + rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); + rxd->opaque = (RXD_OPAQUE_RING_STD | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + /* Now allocate fresh SKBs for each rx ring. */ + for (i = 0; i < tp->rx_pending; i++) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX standard ring. Only " + "%d out of %d buffers were allocated " + "successfully\n", i, tp->rx_pending); + if (i == 0) + goto initfail; + tp->rx_pending = i; + break; + } + } + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + goto done; + + memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); + + if (!tg3_flag(tp, JUMBO_RING_ENABLE)) + goto done; + + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_jmb[i].std; + rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | + RXD_FLAG_JUMBO; + rxd->opaque = (RXD_OPAQUE_RING_JUMBO | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + for (i = 0; i < tp->rx_jumbo_pending; i++) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX jumbo ring. Only %d " + "out of %d buffers were allocated " + "successfully\n", i, tp->rx_jumbo_pending); + if (i == 0) + goto initfail; + tp->rx_jumbo_pending = i; + break; + } + } + + done: + return 0; + + initfail: + tg3_rx_prodring_free(tp, tpr); + return -ENOMEM; + } + + static void tg3_rx_prodring_fini(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) + { + kfree(tpr->rx_std_buffers); + tpr->rx_std_buffers = NULL; + kfree(tpr->rx_jmb_buffers); + tpr->rx_jmb_buffers = NULL; + if (tpr->rx_std) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), + tpr->rx_std, tpr->rx_std_mapping); + tpr->rx_std = NULL; + } + if (tpr->rx_jmb) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), + tpr->rx_jmb, tpr->rx_jmb_mapping); + tpr->rx_jmb = NULL; + } + } + + static int tg3_rx_prodring_init(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) + { + tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_std_buffers) + return -ENOMEM; + + tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_STD_RING_BYTES(tp), + &tpr->rx_std_mapping, + GFP_KERNEL); + if (!tpr->rx_std) + goto err_out; + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_jmb_buffers) + goto err_out; + + tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_JMB_RING_BYTES(tp), + &tpr->rx_jmb_mapping, + GFP_KERNEL); + if (!tpr->rx_jmb) + goto err_out; + } + + return 0; + + err_out: + tg3_rx_prodring_fini(tp, tpr); + return -ENOMEM; + } + + /* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock is not held and we are not + * in an interrupt context and thus may sleep. + */ + static void tg3_free_rings(struct tg3 *tp) + { + int i, j; + + for (j = 0; j < tp->irq_cnt; j++) { + struct tg3_napi *tnapi = &tp->napi[j]; + + tg3_rx_prodring_free(tp, &tnapi->prodring); + + if (!tnapi->tx_buffers) + continue; + + for (i = 0; i < TG3_TX_RING_SIZE; i++) { + struct sk_buff *skb = tnapi->tx_buffers[i].skb; + + if (!skb) + continue; + + tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); + + dev_kfree_skb_any(skb); + } + } + } + + /* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ + static int tg3_init_rings(struct tg3 *tp) + { + int i; + + /* Free up all the SKBs. */ + tg3_free_rings(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + tnapi->tx_prod = 0; + tnapi->tx_cons = 0; + if (tnapi->tx_ring) + memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); + + tnapi->rx_rcb_ptr = 0; + if (tnapi->rx_rcb) + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + + if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + tg3_free_rings(tp); + return -ENOMEM; + } + } + + return 0; + } + + /* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ + static void tg3_free_consistent(struct tg3 *tp) + { + int i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tnapi->tx_ring) { + dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, + tnapi->tx_ring, tnapi->tx_desc_mapping); + tnapi->tx_ring = NULL; + } + + kfree(tnapi->tx_buffers); + tnapi->tx_buffers = NULL; + + if (tnapi->rx_rcb) { + dma_free_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + tnapi->rx_rcb, + tnapi->rx_rcb_mapping); + tnapi->rx_rcb = NULL; + } + + tg3_rx_prodring_fini(tp, &tnapi->prodring); + + if (tnapi->hw_status) { + dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, + tnapi->hw_status, + tnapi->status_mapping); + tnapi->hw_status = NULL; + } + } + + if (tp->hw_stats) { + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), + tp->hw_stats, tp->stats_mapping); + tp->hw_stats = NULL; + } + } + + /* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. Can sleep. + */ + static int tg3_alloc_consistent(struct tg3 *tp) + { + int i; + + tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, + GFP_KERNEL); + if (!tp->hw_stats) + goto err_out; + + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + struct tg3_hw_status *sblk; + + tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); + if (!tnapi->hw_status) + goto err_out; + + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + sblk = tnapi->hw_status; + + if (tg3_rx_prodring_init(tp, &tnapi->prodring)) + goto err_out; + + /* If multivector TSS is enabled, vector 0 does not handle + * tx interrupts. Don't allocate any resources for it. + */ + if ((!i && !tg3_flag(tp, ENABLE_TSS)) || + (i && tg3_flag(tp, ENABLE_TSS))) { + tnapi->tx_buffers = kzalloc( + sizeof(struct tg3_tx_ring_info) * + TG3_TX_RING_SIZE, GFP_KERNEL); + if (!tnapi->tx_buffers) + goto err_out; + + tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, + TG3_TX_RING_BYTES, + &tnapi->tx_desc_mapping, + GFP_KERNEL); + if (!tnapi->tx_ring) + goto err_out; + } + + /* + * When RSS is enabled, the status block format changes + * slightly. The "rx_jumbo_consumer", "reserved", + * and "rx_mini_consumer" members get mapped to the + * other three rx return ring producer indexes. + */ + switch (i) { + default: + tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; + break; + case 2: + tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; + break; + case 3: + tnapi->rx_rcb_prod_idx = &sblk->reserved; + break; + case 4: + tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; + break; + } + + /* + * If multivector RSS is enabled, vector 0 does not handle + * rx or tx interrupts. Don't allocate any resources for it. + */ + if (!i && tg3_flag(tp, ENABLE_RSS)) + continue; + + tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); + if (!tnapi->rx_rcb) + goto err_out; + + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + } + + return 0; + + err_out: + tg3_free_consistent(tp); + return -ENOMEM; + } + + #define MAX_WAIT_CNT 1000 + + /* To stop a block, clear the enable bit and poll till it + * clears. tp->lock is held. + */ + static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) + { + unsigned int i; + u32 val; + + if (tg3_flag(tp, 5705_PLUS)) { + switch (ofs) { + case RCVLSC_MODE: + case DMAC_MODE: + case MBFREE_MODE: + case BUFMGR_MODE: + case MEMARB_MODE: + /* We can't enable/disable these bits of the + * 5705/5750, just say success. + */ + return 0; + + default: + break; + } + } + + val = tr32(ofs); + val &= ~enable_bit; + tw32_f(ofs, val); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + val = tr32(ofs); + if ((val & enable_bit) == 0) + break; + } + + if (i == MAX_WAIT_CNT && !silent) { + dev_err(&tp->pdev->dev, + "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", + ofs, enable_bit); + return -ENODEV; + } + + return 0; + } + + /* tp->lock is held. */ + static int tg3_abort_hw(struct tg3 *tp, int silent) + { + int i, err; + + tg3_disable_ints(tp); + + tp->rx_mode &= ~RX_MODE_ENABLE; + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); + + err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); + + tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tp->tx_mode &= ~TX_MODE_ENABLE; + tw32_f(MAC_TX_MODE, tp->tx_mode); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) + break; + } + if (i >= MAX_WAIT_CNT) { + dev_err(&tp->pdev->dev, + "%s timed out, TX_MODE_ENABLE will not clear " + "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); + err |= -ENODEV; + } + + err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); + + tw32(FTQ_RESET, 0xffffffff); + tw32(FTQ_RESET, 0x00000000); + + err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + } + if (tp->hw_stats) + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + return err; + } + + /* Save PCI command register before chip reset */ + static void tg3_save_pci_state(struct tg3 *tp) + { + pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); + } + + /* Restore PCI state after chip reset */ + static void tg3_restore_pci_state(struct tg3 *tp) + { + u32 val; + + /* Re-enable indirect register accesses. */ + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + /* Set MAX PCI retry to zero. */ + val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) + val |= PCISTATE_RETRY_SAME_DMA; + /* Allow reads and writes to the APE register and memory space. */ + if (tg3_flag(tp, ENABLE_APE)) + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); + + pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { + if (tg3_flag(tp, PCI_EXPRESS)) + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + else { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + } + + /* Make sure PCI-X relaxed ordering bit is clear. */ + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + pcix_cmd &= ~PCI_X_CMD_ERO; + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + if (tg3_flag(tp, 5780_CLASS)) { + + /* Chip reset on 5780 will reset MSI enable bit, + * so need to restore it. + */ + if (tg3_flag(tp, USING_MSI)) { + u16 ctrl; + + pci_read_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + &ctrl); + pci_write_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + ctrl | PCI_MSI_FLAGS_ENABLE); + val = tr32(MSGINT_MODE); + tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); + } + } + } + + /* tp->lock is held. */ + static int tg3_chip_reset(struct tg3 *tp) + { + u32 val; + void (*write_op)(struct tg3 *, u32, u32); + int i, err; + + tg3_nvram_lock(tp); + + tg3_ape_lock(tp, TG3_APE_LOCK_GRC); + + /* No matching tg3_nvram_unlock() after this because + * chip reset below will undo the nvram lock. + */ + tp->nvram_lock_cnt = 0; + + /* GRC_MISC_CFG core clock reset will clear the memory + * enable bit in PCI register 4 and the MSI enable bit + * on some chips, so we save relevant registers here. + */ + tg3_save_pci_state(tp); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + tg3_flag(tp, 5755_PLUS)) + tw32(GRC_FASTBOOT_PC, 0); + + /* + * We must avoid the readl() that normally takes place. + * It locks machines, causes machine checks, and other + * fun things. So, temporarily disable the 5701 + * hardware workaround, while we do the reset. + */ + write_op = tp->write32; + if (write_op == tg3_write_flush_reg32) + tp->write32 = tg3_write32; + + /* Prevent the irq handler from reading or writing PCI registers + * during chip reset when the memory enable bit in the PCI command + * register may be cleared. The chip does not generate interrupt + * at this time, but the irq handler may still be called due to irq + * sharing or irqpoll. + */ + tg3_flag_set(tp, CHIP_RESETTING); + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) { + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + } + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + } + smp_mb(); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + /* do the reset */ + val = GRC_MISC_CFG_CORECLK_RESET; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Force PCIe 1.0a mode */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS) && + tr32(TG3_PCIE_PHY_TSTCTL) == + (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) + tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); + + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { + tw32(GRC_MISC_CFG, (1 << 29)); + val |= (1 << 29); + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); + tw32(GRC_VCPU_EXT_CTRL, + tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); + } + + /* Manage gphy power for all CPMU absent PCIe devices. */ + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) + val |= GRC_MISC_CFG_KEEP_GPHY_POWER; + + tw32(GRC_MISC_CFG, val); + + /* restore 5701 hardware bug workaround write method */ + tp->write32 = write_op; + + /* Unfortunately, we have to delay before the PCI read back. + * Some 575X chips even will not respond to a PCI cfg access + * when the reset command is given to the chip. + * + * How do these hardware designers expect things to work + * properly if the PCI write is posted for a long period + * of time? It is always necessary to have some method by + * which a register read back can occur to push the write + * out which does the reset. + * + * For most tg3 variants the trick below was working. + * Ho hum... + */ + udelay(120); + + /* Flush PCI posted writes. The normal MMIO registers + * are inaccessible at this time so this is the only + * way to make this reliably (actually, this is no longer + * the case, see above). I tried to use indirect + * register read/write but this upset some 5701 variants. + */ + pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); + + udelay(120); + + if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { + u16 val16; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { + int i; + u32 cfg_val; + + /* Wait for link training to complete. */ + for (i = 0; i < 5000; i++) + udelay(100); + + pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); + pci_write_config_dword(tp->pdev, 0xc4, + cfg_val | (1 << 15)); + } + + /* Clear the "no snoop" and "relaxed ordering" bits. */ + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, + &val16); + val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN); + /* + * Older PCIe devices only support the 128 byte + * MPS setting. Enforce the restriction. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) + val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, + val16); + + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + + /* Clear error status */ + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_URD); + } + + tg3_restore_pci_state(tp); + + tg3_flag_clear(tp, CHIP_RESETTING); + tg3_flag_clear(tp, ERROR_PROCESSED); + + val = 0; + if (tg3_flag(tp, 5780_CLASS)) + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { + tg3_stop_fw(tp); + tw32(0x5000, 0x400); + } + + tw32(GRC_MODE, tp->grc_mode); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { + val = tr32(0xc4); + + tw32(0xc4, val | (1 << 15)); + } + + if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) + tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; + tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_TBI; + val = tp->mac_mode; + } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_GMII; + val = tp->mac_mode; + } else + val = 0; + + tw32_f(MAC_MODE, val); + udelay(40); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); + + err = tg3_poll_fw(tp); + if (err) + return err; + + tg3_mdio_start(tp); + + if (tg3_flag(tp, PCI_EXPRESS) && + tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS)) { + val = tr32(0x7c00); + + tw32(0x7c00, val | (1 << 25)); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_CPMU_CLCK_ORIDE); + tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); + } + + /* Reprobe ASF enable state. */ + tg3_flag_clear(tp, ENABLE_ASF); + tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + tp->last_event_jiffies = jiffies; + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + } + } + + return 0; + } + + /* tp->lock is held. */ + static int tg3_halt(struct tg3 *tp, int kind, int silent) + { + int err; + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, kind); + + tg3_abort_hw(tp, silent); + err = tg3_chip_reset(tp); + + __tg3_set_mac_addr(tp, 0); + + tg3_write_sig_legacy(tp, kind); + tg3_write_sig_post_reset(tp, kind); + + if (err) + return err; + + return 0; + } + + static int tg3_set_mac_addr(struct net_device *dev, void *p) + { + struct tg3 *tp = netdev_priv(dev); + struct sockaddr *addr = p; + int err = 0, skip_mac_1 = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + if (!netif_running(dev)) + return 0; + + if (tg3_flag(tp, ENABLE_ASF)) { + u32 addr0_high, addr0_low, addr1_high, addr1_low; + + addr0_high = tr32(MAC_ADDR_0_HIGH); + addr0_low = tr32(MAC_ADDR_0_LOW); + addr1_high = tr32(MAC_ADDR_1_HIGH); + addr1_low = tr32(MAC_ADDR_1_LOW); + + /* Skip MAC addr 1 if ASF is using it. */ + if ((addr0_high != addr1_high || addr0_low != addr1_low) && + !(addr1_high == 0 && addr1_low == 0)) + skip_mac_1 = 1; + } + spin_lock_bh(&tp->lock); + __tg3_set_mac_addr(tp, skip_mac_1); + spin_unlock_bh(&tp->lock); + + return err; + } + + /* tp->lock is held. */ + static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, + dma_addr_t mapping, u32 maxlen_flags, + u32 nic_addr) + { + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), + ((u64) mapping >> 32)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), + ((u64) mapping & 0xffffffff)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), + maxlen_flags); + + if (!tg3_flag(tp, 5705_PLUS)) + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_NIC_ADDR), + nic_addr); + } + + static void __tg3_set_rx_mode(struct net_device *); + static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) + { + int i; + + if (!tg3_flag(tp, ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); + tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); + tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_TXCOL_TICKS, 0); + tw32(HOSTCC_TXMAX_FRAMES, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT, 0); + } + + if (!tg3_flag(tp, ENABLE_RSS)) { + tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); + tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); + tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_RXCOL_TICKS, 0); + tw32(HOSTCC_RXMAX_FRAMES, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT, 0); + } + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 val = ec->stats_block_coalesce_usecs; + + tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); + tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); + + if (!netif_carrier_ok(tp->dev)) + val = 0; + + tw32(HOSTCC_STAT_COAL_TICKS, val); + } + + for (i = 0; i < tp->irq_cnt - 1; i++) { + u32 reg; + + reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->rx_coalesce_usecs); + reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames); + reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames_irq); + + if (tg3_flag(tp, ENABLE_TSS)) { + reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->tx_coalesce_usecs); + reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames); + reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames_irq); + } + } + + for (; i < tp->irq_max - 1; i++) { + tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + + if (tg3_flag(tp, ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + } + } + } + + /* tp->lock is held. */ + static void tg3_rings_reset(struct tg3 *tp) + { + int i; + u32 stblk, txrcb, rxrcb, limit; + struct tg3_napi *tnapi = &tp->napi[0]; + + /* Disable all transmit rings but the first. */ + if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; + else + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + + for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + txrcb < limit; txrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + + /* Disable all receive return rings but the first. */ + if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; + else if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; + else + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + + for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Disable interrupts */ + tw32_mailbox_f(tp->napi[0].int_mbox, 1); + tp->napi[0].chk_msi_cnt = 0; + tp->napi[0].last_rx_cons = 0; + tp->napi[0].last_tx_cons = 0; + + /* Zero mailbox registers. */ + if (tg3_flag(tp, SUPPORT_MSIX)) { + for (i = 1; i < tp->irq_max; i++) { + tp->napi[i].tx_prod = 0; + tp->napi[i].tx_cons = 0; + if (tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[i].prodmbox, 0); + tw32_rx_mbox(tp->napi[i].consmbox, 0); + tw32_mailbox_f(tp->napi[i].int_mbox, 1); + tp->napi[i].chk_msi_cnt = 0; + tp->napi[i].last_rx_cons = 0; + tp->napi[i].last_tx_cons = 0; + } + if (!tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[0].prodmbox, 0); + } else { + tp->napi[0].tx_prod = 0; + tp->napi[0].tx_cons = 0; + tw32_mailbox(tp->napi[0].prodmbox, 0); + tw32_rx_mbox(tp->napi[0].consmbox, 0); + } + + /* Make sure the NIC-based send BD rings are disabled. */ + if (!tg3_flag(tp, 5705_PLUS)) { + u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < 16; i++) + tw32_tx_mbox(mbox + i * 8, 0); + } + + txrcb = NIC_SRAM_SEND_RCB; + rxrcb = NIC_SRAM_RCV_RET_RCB; + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + /* Set status block DMA address */ + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tnapi->status_mapping >> 32)); + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tnapi->status_mapping & 0xffffffff)); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + if (tnapi->rx_rcb) { + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + rxrcb += TG3_BDINFO_SIZE; + } + + stblk = HOSTCC_STATBLCK_RING1; + + for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { + u64 mapping = (u64)tnapi->status_mapping; + tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); + tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + ((tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT), 0); + + stblk += 8; + rxrcb += TG3_BDINFO_SIZE; + } + } + + static void tg3_setup_rxbd_thresholds(struct tg3 *tp) + { + u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; + + if (!tg3_flag(tp, 5750_PLUS) || + tg3_flag(tp, 5780_CLASS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; + else + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; + + nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); + host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); + + val = min(nic_rep_thresh, host_rep_thresh); + tw32(RCVBDI_STD_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(STD_REPLENISH_LWM, bdcache_maxcnt); + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; + else + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; + + host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); + + val = min(bdcache_maxcnt / 2, host_rep_thresh); + tw32(RCVBDI_JUMBO_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); + } + + /* tp->lock is held. */ + static int tg3_reset_hw(struct tg3 *tp, int reset_phy) + { + u32 val, rdmac_mode; + int i, err, limit; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tg3_disable_ints(tp); + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); + + if (tg3_flag(tp, INIT_COMPLETE)) + tg3_abort_hw(tp, 1); + + /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, + TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + TG3_CPMU_EEEMD_LPI_IN_TX | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, val); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + TG3_CPMU_DBTMR1_LNKIDLE_2047US); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); + } + + if (reset_phy) + tg3_phy_reset(tp); + + err = tg3_chip_reset(tp); + if (err) + return err; + + tg3_write_sig_legacy(tp, RESET_KIND_INIT); + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + val = tr32(TG3_CPMU_CTRL); + val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); + tw32(TG3_CPMU_CTRL, val); + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + + val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); + val &= ~CPMU_LNK_AWARE_MACCLK_MASK; + val |= CPMU_LNK_AWARE_MACCLK_6_25; + tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); + + val = tr32(TG3_CPMU_HST_ACC); + val &= ~CPMU_HST_ACC_MACCLK_MASK; + val |= CPMU_HST_ACC_MACCLK_6_25; + tw32(TG3_CPMU_HST_ACC, val); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; + val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | + PCIE_PWR_MGMT_L1_THRESH_4MS; + tw32(PCIE_PWR_MGMT_THRESH, val); + + val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; + tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); + + tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); + + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + if (tg3_flag(tp, L1PLLPD_EN)) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, + val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); + + tw32(GRC_MODE, grc_mode); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_PL_LO_PHYCTL5); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, + val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); + + tw32(GRC_MODE, grc_mode); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of DL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_DL_LO_FTSMAX); + val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, + val | TG3_PCIE_DL_LO_FTSMAX_VAL); + + tw32(GRC_MODE, grc_mode); + } + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + } + + /* This works around an issue with Athlon chipsets on + * B3 tigon3 silicon. This bit has no effect on any + * other revision. But do not set this on PCI Express + * chips and don't even touch the clocks if the CPMU is present. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) { + if (!tg3_flag(tp, PCI_EXPRESS)) + tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; + tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) { + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_RETRY_SAME_DMA; + tw32(TG3PCI_PCISTATE, val); + } + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + tw32(TG3PCI_PCISTATE, val); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { + /* Enable some hw fixes. */ + val = tr32(TG3PCI_MSI_DATA); + val |= (1 << 26) | (1 << 28) | (1 << 29); + tw32(TG3PCI_MSI_DATA, val); + } + + /* Descriptor ring init may make accesses to the + * NIC SRAM area to setup the TX descriptors, so we + * can only do this after the hardware has been + * successfully reset. + */ + err = tg3_init_rings(tp); + if (err) + return err; + + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(TG3PCI_DMA_RW_CTRL) & + ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) + val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= DMA_RWCTRL_TAGGED_STAT_WA; + tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { + /* This value is determined during the probe time DMA + * engine test, tg3_test_dma. + */ + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + + tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | + GRC_MODE_4X_NIC_SEND_RINGS | + GRC_MODE_NO_TX_PHDR_CSUM | + GRC_MODE_NO_RX_PHDR_CSUM); + tp->grc_mode |= GRC_MODE_HOST_SENDBDS; + + /* Pseudo-header checksum is done by hardware logic and not + * the offload processers, so make the chip do the pseudo- + * header checksums on receive. For transmit it is more + * convenient to do the pseudo-header checksum in software + * as Linux does that on transmit for us in all cases. + */ + tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; + + tw32(GRC_MODE, + tp->grc_mode | + (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); + + /* Setup the timer prescalar register. Clock is always 66Mhz. */ + val = tr32(GRC_MISC_CFG); + val &= ~0xff; + val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + + /* Initialize MBUF/DESC pool. */ + if (tg3_flag(tp, 5750_PLUS)) { + /* Do nothing. */ + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { + tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); + else + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); + tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); + tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); + } else if (tg3_flag(tp, TSO_CAPABLE)) { + int fw_len; + + fw_len = tp->fw_len; + fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); + tw32(BUFMGR_MB_POOL_ADDR, + NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); + tw32(BUFMGR_MB_POOL_SIZE, + NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); + } + + if (tp->dev->mtu <= ETH_DATA_LEN) { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water); + } else { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water_jumbo); + } + tw32(BUFMGR_DMA_LOW_WATER, + tp->bufmgr_config.dma_low_water); + tw32(BUFMGR_DMA_HIGH_WATER, + tp->bufmgr_config.dma_high_water); + + val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + val |= BUFMGR_MODE_NO_TX_UNDERRUN; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) + val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; + tw32(BUFMGR_MODE, val); + for (i = 0; i < 2000; i++) { + if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) + break; + udelay(10); + } + if (i >= 2000) { + netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); + return -ENODEV; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) + tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); + + tg3_setup_rxbd_thresholds(tp); + + /* Initialize TG3_BDINFO's at: + * RCVDBDI_STD_BD: standard eth size rx ring + * RCVDBDI_JUMBO_BD: jumbo frame rx ring + * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) + * + * like so: + * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring + * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | + * ring attribute flags + * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM + * + * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. + * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. + * + * The size of each ring is fixed in the firmware, but the location is + * configurable. + */ + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_std_mapping >> 32)); + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_std_mapping & 0xffffffff)); + if (!tg3_flag(tp, 5717_PLUS)) + tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_BUFFER_DESC); + + /* Disable the mini ring */ + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Program the jumbo buffer descriptor ring control + * blocks on those devices that have them. + */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { + + if (tg3_flag(tp, JUMBO_RING_ENABLE)) { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_jmb_mapping >> 32)); + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_jmb_mapping & 0xffffffff)); + val = TG3_RX_JMB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT; + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + val | BDINFO_FLAGS_USE_EXT_RECV); + if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_JUMBO_BUFFER_DESC); + } else { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + } + + if (tg3_flag(tp, 57765_PLUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = TG3_RX_STD_MAX_SIZE_5700; + else + val = TG3_RX_STD_MAX_SIZE_5717; + val <<= BDINFO_FLAGS_MAXLEN_SHIFT; + val |= (TG3_RX_STD_DMA_SZ << 2); + } else + val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; + } else + val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; + + tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); + + tpr->rx_std_prod_idx = tp->rx_pending; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); + + tpr->rx_jmb_prod_idx = + tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); + + tg3_rings_reset(tp); + + /* Initialize MAC address and backoff seed. */ + __tg3_set_mac_addr(tp, 0); + + /* MTU + ethernet header + FCS + optional VLAN tag */ + tw32(MAC_RX_MTU_SIZE, + tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + /* The slot time is changed by tg3_setup_phy if we + * run at gigabit with half duplex. + */ + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + tw32(MAC_TX_LENGTHS, val); + + /* Receive rules. */ + tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); + tw32(RCVLPC_CONFIG, 0x0181); + + /* Calculate RDMAC_MODE setting early, we need it to determine + * the RCVLPC_STATE_ENABLE mask. + */ + rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | + RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | + RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | + RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | + RDMAC_MODE_LNGREAD_ENAB); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | + RDMAC_MODE_MBUF_RBD_CRPT_ENAB | + RDMAC_MODE_MBUF_SBD_CRPT_ENAB; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + } + } + + if (tg3_flag(tp, PCI_EXPRESS)) + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; + + if (tg3_flag(tp, 57765_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) { + val = tr32(TG3_RDMA_RSRVCTRL_REG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | + TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | + TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); + val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | + TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | + TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; + } + tw32(TG3_RDMA_RSRVCTRL_REG, + val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); + tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); + } + + /* Receive/send statistics. */ + if (tg3_flag(tp, 5750_PLUS)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_DACK_FIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && + tg3_flag(tp, TSO_CAPABLE)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else { + tw32(RCVLPC_STATS_ENABLE, 0xffffff); + } + tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); + tw32(SNDDATAI_STATSENAB, 0xffffff); + tw32(SNDDATAI_STATSCTRL, + (SNDDATAI_SCTRL_ENABLE | + SNDDATAI_SCTRL_FASTUPD)); + + /* Setup host coalescing engine. */ + tw32(HOSTCC_MODE, 0); + for (i = 0; i < 2000; i++) { + if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) + break; + udelay(10); + } + + __tg3_set_coalesce(tp, &tp->coal); + + if (!tg3_flag(tp, 5705_PLUS)) { + /* Status/statistics block address. See tg3_timer, + * the tg3_periodic_fetch_stats call there, and + * tg3_get_stats to see how this works for 5705/5750 chips. + */ + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tp->stats_mapping >> 32)); + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tp->stats_mapping & 0xffffffff)); + tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); + + tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); + + /* Clear statistics and status block memory areas */ + for (i = NIC_SRAM_STATS_BLK; + i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; + i += sizeof(u32)) { + tg3_write_mem(tp, i, 0); + udelay(40); + } + } + + tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); + + tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); + tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + /* reset to prevent losing 1st rx packet intermittently */ + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + + tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | + MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | + MAC_MODE_FHDE_ENABLE; + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + if (!tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); + udelay(40); + + /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). + * If TG3_FLAG_IS_NIC is zero, we should read the + * register to preserve the GPIO settings for LOMs. The GPIOs, + * whether used as inputs or outputs, are set by boot code after + * reset. + */ + if (!tg3_flag(tp, IS_NIC)) { + u32 gpio_mask; + + gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | + GRC_LCLCTRL_GPIO_OUTPUT3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; + + tp->grc_local_ctrl &= ~gpio_mask; + tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; + + /* GPIO1 must be driven high for eeprom write protect */ + if (tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + } + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(100); + + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { + val = tr32(MSGINT_MODE); + val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; + if (!tg3_flag(tp, 1SHOT_MSI)) + val |= MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + + if (!tg3_flag(tp, 5705_PLUS)) { + tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); + udelay(40); + } + + val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | + WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | + WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | + WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | + WDMAC_MODE_LNGREAD_ENAB); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || + tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { + /* nothing */ + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + val |= WDMAC_MODE_RX_ACCEL; + } + } + + /* Enable host coalescing bug fix */ + if (tg3_flag(tp, 5755_PLUS)) + val |= WDMAC_MODE_STATUS_TAG_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + val |= WDMAC_MODE_BURST_ALL_DATA; + + tw32_f(WDMAC_MODE, val); + udelay(40); + + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { + pcix_cmd &= ~PCI_X_CMD_MAX_READ; + pcix_cmd |= PCI_X_CMD_READ_2K; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); + pcix_cmd |= PCI_X_CMD_READ_2K; + } + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + tw32_f(RDMAC_MODE, rdmac_mode); + udelay(40); + + tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tw32(SNDDATAC_MODE, + SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); + else + tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); + + tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); + tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); + val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + val |= RCVDBDI_MODE_LRG_RING_SZ; + tw32(RCVDBDI_MODE, val); + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); + val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; + if (tg3_flag(tp, ENABLE_TSS)) + val |= SNDBDI_MODE_MULTI_TXQ_EN; + tw32(SNDBDI_MODE, val); + tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + err = tg3_load_5701_a0_firmware_fix(tp); + if (err) + return err; + } + + if (tg3_flag(tp, TSO_CAPABLE)) { + err = tg3_load_tso_firmware(tp); + if (err) + return err; + } + + tp->tx_mode = TX_MODE_ENABLE; + + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; + tp->tx_mode &= ~val; + tp->tx_mode |= tr32(MAC_TX_MODE) & val; + } + + tw32_f(MAC_TX_MODE, tp->tx_mode); + udelay(100); + + if (tg3_flag(tp, ENABLE_RSS)) { + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + if (tp->irq_cnt == 2) { + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) { + tw32(reg, 0x0); + reg += 4; + } + } else { + u32 val; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + val = i % (tp->irq_cnt - 1); + i++; + for (; i % 8; i++) { + val <<= 4; + val |= (i % (tp->irq_cnt - 1)); + } + tw32(reg, val); + reg += 4; + } + } + + /* Setup the "secret" hash key. */ + tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); + tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); + tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); + tw32(MAC_RSS_HASH_KEY_3, 0x36621985); + tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); + tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); + tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); + tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); + tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); + tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); + } + + tp->rx_mode = RX_MODE_ENABLE; + if (tg3_flag(tp, 5755_PLUS)) + tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + + if (tg3_flag(tp, ENABLE_RSS)) + tp->rx_mode |= RX_MODE_RSS_ENABLE | + RX_MODE_RSS_ITBL_HASH_BITS_7 | + RX_MODE_RSS_IPV6_HASH_EN | + RX_MODE_RSS_TCP_IPV6_HASH_EN | + RX_MODE_RSS_IPV4_HASH_EN | + RX_MODE_RSS_TCP_IPV4_HASH_EN; + + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + tw32(MAC_LED_CTRL, tp->led_ctrl); + + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && + !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { + /* Set drive transmission level to 1.2V */ + /* only if the signal pre-emphasis bit is not set */ + val = tr32(MAC_SERDES_CFG); + val &= 0xfffff000; + val |= 0x880; + tw32(MAC_SERDES_CFG, val); + } + if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) + tw32(MAC_SERDES_CFG, 0x616000); + } + + /* Prevent chip from dropping frames when flow control + * is enabled. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = 1; + else + val = 2; + tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + /* Use hardware link auto-negotiation */ + tg3_flag_set(tp, HW_AUTONEG); + } + + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + u32 tmp; + + tmp = tr32(SERDES_RX_CTRL); + tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); + tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; + tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + } + + if (!tg3_flag(tp, USE_PHYLIB)) { + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + tp->link_config.speed = tp->link_config.orig_speed; + tp->link_config.duplex = tp->link_config.orig_duplex; + tp->link_config.autoneg = tp->link_config.orig_autoneg; + } + + err = tg3_setup_phy(tp, 0); + if (err) + return err; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + u32 tmp; + + /* Clear CRC stats. */ + if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { + tg3_writephy(tp, MII_TG3_TEST1, + tmp | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); + } + } + } + + __tg3_set_rx_mode(tp->dev); + + /* Initialize receive rules. */ + tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); + + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) + limit = 8; + else + limit = 16; + if (tg3_flag(tp, ENABLE_ASF)) + limit -= 4; + switch (limit) { + case 16: + tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + case 15: + tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + case 14: + tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + case 13: + tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + case 12: + tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + case 11: + tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + case 10: + tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + case 9: + tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + case 8: + tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + case 7: + tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + case 6: + tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + case 5: + tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + case 4: + /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ + case 3: + /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ + case 2: + case 1: + + default: + break; + } + + if (tg3_flag(tp, ENABLE_APE)) + /* Write our heartbeat update interval to APE. */ + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, + APE_HOST_HEARTBEAT_INT_DISABLE); + + tg3_write_sig_post_reset(tp, RESET_KIND_INIT); + + return 0; + } + + /* Called at device open time to get the chip ready for + * packet processing. Invoked with tp->lock held. + */ + static int tg3_init_hw(struct tg3 *tp, int reset_phy) + { + tg3_switch_clocks(tp); + + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + return tg3_reset_hw(tp, reset_phy); + } + + #define TG3_STAT_ADD32(PSTAT, REG) \ + do { u32 __val = tr32(REG); \ + (PSTAT)->low += __val; \ + if ((PSTAT)->low < __val) \ + (PSTAT)->high += 1; \ + } while (0) + + static void tg3_periodic_fetch_stats(struct tg3 *tp) + { + struct tg3_hw_stats *sp = tp->hw_stats; + + if (!netif_carrier_ok(tp->dev)) + return; + + TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); + TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); + TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); + TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); + TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); + TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); + TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); + TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); + TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); + + TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); + TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); + TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); + TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); + TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); + TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); + TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); + TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); + TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); + TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); + + TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { + TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); + } else { + u32 val = tr32(HOSTCC_FLOW_ATTN); + val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; + if (val) { + tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); + sp->rx_discards.low += val; + if (sp->rx_discards.low < val) + sp->rx_discards.high += 1; + } + sp->mbuf_lwm_thresh_hit = sp->rx_discards; + } + TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); + } + + static void tg3_chk_missed_msi(struct tg3 *tp) + { + u32 i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tg3_has_work(tnapi)) { + if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && + tnapi->last_tx_cons == tnapi->tx_cons) { + if (tnapi->chk_msi_cnt < 1) { + tnapi->chk_msi_cnt++; + return; + } + tg3_msi(0, tnapi); + } + } + tnapi->chk_msi_cnt = 0; + tnapi->last_rx_cons = tnapi->rx_rcb_ptr; + tnapi->last_tx_cons = tnapi->tx_cons; + } + } + + static void tg3_timer(unsigned long __opaque) + { + struct tg3 *tp = (struct tg3 *) __opaque; + + if (tp->irq_sync) + goto restart_timer; + + spin_lock(&tp->lock); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_chk_missed_msi(tp); + + if (!tg3_flag(tp, TAGGED_STATUS)) { + /* All of this garbage is because when using non-tagged + * IRQ status the mailbox/status_block protocol the chip + * uses with the cpu is race prone. + */ + if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { + tw32(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + } else { + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); + } + + if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tg3_flag_set(tp, RESTART_TIMER); + spin_unlock(&tp->lock); + schedule_work(&tp->reset_task); + return; + } + } + + /* This part only runs once per second. */ + if (!--tp->timer_counter) { + if (tg3_flag(tp, 5705_PLUS)) + tg3_periodic_fetch_stats(tp); + + if (tp->setlpicnt && !--tp->setlpicnt) + tg3_phy_eee_enable(tp); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + u32 mac_stat; + int phy_event; + + mac_stat = tr32(MAC_STATUS); + + phy_event = 0; + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { + if (mac_stat & MAC_STATUS_MI_INTERRUPT) + phy_event = 1; + } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) + phy_event = 1; + + if (phy_event) + tg3_setup_phy(tp, 0); + } else if (tg3_flag(tp, POLL_SERDES)) { + u32 mac_stat = tr32(MAC_STATUS); + int need_setup = 0; + + if (netif_carrier_ok(tp->dev) && + (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { + need_setup = 1; + } + if (!netif_carrier_ok(tp->dev) && + (mac_stat & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET))) { + need_setup = 1; + } + if (need_setup) { + if (!tp->serdes_counter) { + tw32_f(MAC_MODE, + (tp->mac_mode & + ~MAC_MODE_PORT_MODE_MASK)); + udelay(40); + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + tg3_setup_phy(tp, 0); + } + } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tg3_serdes_parallel_detect(tp); + } + + tp->timer_counter = tp->timer_multiplier; + } + + /* Heartbeat is only sent once every 2 seconds. + * + * The heartbeat is to tell the ASF firmware that the host + * driver is still alive. In the event that the OS crashes, + * ASF needs to reset the hardware to free up the FIFO space + * that may be filled with rx packets destined for the host. + * If the FIFO is full, ASF will no longer function properly. + * + * Unintended resets have been reported on real time kernels + * where the timer doesn't run on time. Netpoll will also have + * same problem. + * + * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware + * to check the ring condition when the heartbeat is expiring + * before doing the reset. This will prevent most unintended + * resets. + */ + if (!--tp->asf_counter) { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, + FWCMD_NICDRV_ALIVE3); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, + TG3_FW_UPDATE_TIMEOUT_SEC); + + tg3_generate_fw_event(tp); + } + tp->asf_counter = tp->asf_multiplier; + } + + spin_unlock(&tp->lock); + + restart_timer: + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + } + + static int tg3_request_irq(struct tg3 *tp, int irq_num) + { + irq_handler_t fn; + unsigned long flags; + char *name; + struct tg3_napi *tnapi = &tp->napi[irq_num]; + + if (tp->irq_cnt == 1) + name = tp->dev->name; + else { + name = &tnapi->irq_lbl[0]; + snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); + name[IFNAMSIZ-1] = 0; + } + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + fn = tg3_msi; + if (tg3_flag(tp, 1SHOT_MSI)) + fn = tg3_msi_1shot; + flags = 0; + } else { + fn = tg3_interrupt; + if (tg3_flag(tp, TAGGED_STATUS)) + fn = tg3_interrupt_tagged; + flags = IRQF_SHARED; + } + + return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); + } + + static int tg3_test_interrupt(struct tg3 *tp) + { + struct tg3_napi *tnapi = &tp->napi[0]; + struct net_device *dev = tp->dev; + int err, i, intr_ok = 0; + u32 val; + + if (!netif_running(dev)) + return -ENODEV; + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + /* + * Turn off MSI one shot mode. Otherwise this test has no + * observable way to know whether the interrupt was delivered. + */ + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + + err = request_irq(tnapi->irq_vec, tg3_test_isr, + IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); + if (err) + return err; + + tnapi->hw_status->status &= ~SD_STATUS_UPDATED; + tg3_enable_ints(tp); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + tnapi->coal_now); + + for (i = 0; i < 5; i++) { + u32 int_mbox, misc_host_ctrl; + + int_mbox = tr32_mailbox(tnapi->int_mbox); + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + + if ((int_mbox != 0) || + (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { + intr_ok = 1; + break; + } + + if (tg3_flag(tp, 57765_PLUS) && + tnapi->hw_status->status_tag != tnapi->last_tag) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + msleep(10); + } + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + err = tg3_request_irq(tp, 0); + + if (err) + return err; + + if (intr_ok) { + /* Reenable MSI one shot mode. */ + if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { + val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + return 0; + } + + return -EIO; + } + + /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is + * successfully restored + */ + static int tg3_test_msi(struct tg3 *tp) + { + int err; + u16 pci_cmd; + + if (!tg3_flag(tp, USING_MSI)) + return 0; + + /* Turn off SERR reporting in case MSI terminates with Master + * Abort. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_write_config_word(tp->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = tg3_test_interrupt(tp); + + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + if (!err) + return 0; + + /* other failures */ + if (err != -EIO) + return err; + + /* MSI test failed, go back to INTx mode */ + netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " + "to INTx mode. Please report this failure to the PCI " + "maintainer and include system chipset information\n"); + + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + pci_disable_msi(tp->pdev); + + tg3_flag_clear(tp, USING_MSI); + tp->napi[0].irq_vec = tp->pdev->irq; + + err = tg3_request_irq(tp, 0); + if (err) + return err; + + /* Need to reset the chip because the MSI cycle may have terminated + * with Master Abort. + */ + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_init_hw(tp, 1); + + tg3_full_unlock(tp); + + if (err) + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + return err; + } + + static int tg3_request_firmware(struct tg3 *tp) + { + const __be32 *fw_data; + + if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { + netdev_err(tp->dev, "Failed to load firmware "%s"\n", + tp->fw_needed); + return -ENOENT; + } + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + * start address and _full_ length including BSS sections + * (which must be longer than the actual data, of course + */ + + tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ + if (tp->fw_len < (tp->fw->size - 12)) { + netdev_err(tp->dev, "bogus length %d in "%s"\n", + tp->fw_len, tp->fw_needed); + release_firmware(tp->fw); + tp->fw = NULL; + return -EINVAL; + } + + /* We no longer need firmware; we have it. */ + tp->fw_needed = NULL; + return 0; + } + + static bool tg3_enable_msix(struct tg3 *tp) + { + int i, rc, cpus = num_online_cpus(); + struct msix_entry msix_ent[tp->irq_max]; + + if (cpus == 1) + /* Just fallback to the simpler MSI mode. */ + return false; + + /* + * We want as many rx rings enabled as there are cpus. + * The first MSIX vector only deals with link interrupts, etc, + * so we add one to the number of vectors we are requesting. + */ + tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); + + for (i = 0; i < tp->irq_max; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); + if (rc < 0) { + return false; + } else if (rc != 0) { + if (pci_enable_msix(tp->pdev, msix_ent, rc)) + return false; + netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", + tp->irq_cnt, rc); + tp->irq_cnt = rc; + } + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].irq_vec = msix_ent[i].vector; + + netif_set_real_num_tx_queues(tp->dev, 1); + rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1; + if (netif_set_real_num_rx_queues(tp->dev, rc)) { + pci_disable_msix(tp->pdev); + return false; + } + + if (tp->irq_cnt > 1) { + tg3_flag_set(tp, ENABLE_RSS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_flag_set(tp, ENABLE_TSS); + netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); + } + } + + return true; + } + + static void tg3_ints_init(struct tg3 *tp) + { + if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && + !tg3_flag(tp, TAGGED_STATUS)) { + /* All MSI supporting chips should support tagged + * status. Assert that this is the case. + */ + netdev_warn(tp->dev, + "MSI without TAGGED_STATUS? Not using MSI\n"); + goto defcfg; + } + + if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) + tg3_flag_set(tp, USING_MSIX); + else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) + tg3_flag_set(tp, USING_MSI); + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + u32 msi_mode = tr32(MSGINT_MODE); + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) + msi_mode |= MSGINT_MODE_MULTIVEC_EN; + if (!tg3_flag(tp, 1SHOT_MSI)) + msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); + } + defcfg: + if (!tg3_flag(tp, USING_MSIX)) { + tp->irq_cnt = 1; + tp->napi[0].irq_vec = tp->pdev->irq; + netif_set_real_num_tx_queues(tp->dev, 1); + netif_set_real_num_rx_queues(tp->dev, 1); + } + } + + static void tg3_ints_fini(struct tg3 *tp) + { + if (tg3_flag(tp, USING_MSIX)) + pci_disable_msix(tp->pdev); + else if (tg3_flag(tp, USING_MSI)) + pci_disable_msi(tp->pdev); + tg3_flag_clear(tp, USING_MSI); + tg3_flag_clear(tp, USING_MSIX); + tg3_flag_clear(tp, ENABLE_RSS); + tg3_flag_clear(tp, ENABLE_TSS); + } + + static int tg3_open(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + int i, err; + + if (tp->fw_needed) { + err = tg3_request_firmware(tp); + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + if (err) + return err; + } else if (err) { + netdev_warn(tp->dev, "TSO capability disabled\n"); + tg3_flag_clear(tp, TSO_CAPABLE); + } else if (!tg3_flag(tp, TSO_CAPABLE)) { + netdev_notice(tp->dev, "TSO capability restored\n"); + tg3_flag_set(tp, TSO_CAPABLE); + } + } + + netif_carrier_off(tp->dev); + + err = tg3_power_up(tp); + if (err) + return err; + + tg3_full_lock(tp, 0); + + tg3_disable_ints(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + /* + * Setup interrupts first so we know how + * many NAPI resources to allocate + */ + tg3_ints_init(tp); + + /* The placement of this call is tied + * to the setup and use of Host TX descriptors. + */ + err = tg3_alloc_consistent(tp); + if (err) + goto err_out1; + + tg3_napi_init(tp); + + tg3_napi_enable(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + err = tg3_request_irq(tp, i); + if (err) { + for (i--; i >= 0; i--) + free_irq(tnapi->irq_vec, tnapi); + break; + } + } + + if (err) + goto err_out2; + + tg3_full_lock(tp, 0); + + err = tg3_init_hw(tp, 1); + if (err) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + } else { + if (tg3_flag(tp, TAGGED_STATUS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) + tp->timer_offset = HZ; + else + tp->timer_offset = HZ / 10; + + BUG_ON(tp->timer_offset > HZ); + tp->timer_counter = tp->timer_multiplier = + (HZ / tp->timer_offset); + tp->asf_counter = tp->asf_multiplier = + ((HZ / tp->timer_offset) * 2); + + init_timer(&tp->timer); + tp->timer.expires = jiffies + tp->timer_offset; + tp->timer.data = (unsigned long) tp; + tp->timer.function = tg3_timer; + } + + tg3_full_unlock(tp); + + if (err) + goto err_out3; + + if (tg3_flag(tp, USING_MSI)) { + err = tg3_test_msi(tp); + + if (err) { + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_full_unlock(tp); + + goto err_out2; + } + + if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { + u32 val = tr32(PCIE_TRANSACTION_CFG); + + tw32(PCIE_TRANSACTION_CFG, + val | PCIE_TRANS_CFG_1SHOT_MSI); + } + } + + tg3_phy_start(tp); + + tg3_full_lock(tp, 0); + + add_timer(&tp->timer); + tg3_flag_set(tp, INIT_COMPLETE); + tg3_enable_ints(tp); + + tg3_full_unlock(tp); + + netif_tx_start_all_queues(dev); + + /* + * Reset loopback feature if it was turned on while the device was down + * make sure that it's installed properly now. + */ + if (dev->features & NETIF_F_LOOPBACK) + tg3_set_loopback(dev, dev->features); + + return 0; + + err_out3: + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + + err_out2: + tg3_napi_disable(tp); + tg3_napi_fini(tp); + tg3_free_consistent(tp); + + err_out1: + tg3_ints_fini(tp); + tg3_frob_aux_power(tp, false); + pci_set_power_state(tp->pdev, PCI_D3hot); + return err; + } + + static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, + struct rtnl_link_stats64 *); + static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); + + static int tg3_close(struct net_device *dev) + { + int i; + struct tg3 *tp = netdev_priv(dev); + + tg3_napi_disable(tp); + cancel_work_sync(&tp->reset_task); + + netif_tx_stop_all_queues(dev); + + del_timer_sync(&tp->timer); + + tg3_phy_stop(tp); + + tg3_full_lock(tp, 1); + + tg3_disable_ints(tp); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + + tg3_ints_fini(tp); + + tg3_get_stats64(tp->dev, &tp->net_stats_prev); + + memcpy(&tp->estats_prev, tg3_get_estats(tp), + sizeof(tp->estats_prev)); + + tg3_napi_fini(tp); + + tg3_free_consistent(tp); + + tg3_power_down(tp); + + netif_carrier_off(tp->dev); + + return 0; + } + + static inline u64 get_stat64(tg3_stat64_t *val) + { + return ((u64)val->high << 32) | ((u64)val->low); + } + + static u64 calc_crc_errors(struct tg3 *tp) + { + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 val; + + spin_lock_bh(&tp->lock); + if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { + tg3_writephy(tp, MII_TG3_TEST1, + val | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); + } else + val = 0; + spin_unlock_bh(&tp->lock); + + tp->phy_crc_errors += val; + + return tp->phy_crc_errors; + } + + return get_stat64(&hw_stats->rx_fcs_errors); + } + + #define ESTAT_ADD(member) \ + estats->member = old_estats->member + \ + get_stat64(&hw_stats->member) + + static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) + { + struct tg3_ethtool_stats *estats = &tp->estats; + struct tg3_ethtool_stats *old_estats = &tp->estats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!hw_stats) + return old_estats; + + ESTAT_ADD(rx_octets); + ESTAT_ADD(rx_fragments); + ESTAT_ADD(rx_ucast_packets); + ESTAT_ADD(rx_mcast_packets); + ESTAT_ADD(rx_bcast_packets); + ESTAT_ADD(rx_fcs_errors); + ESTAT_ADD(rx_align_errors); + ESTAT_ADD(rx_xon_pause_rcvd); + ESTAT_ADD(rx_xoff_pause_rcvd); + ESTAT_ADD(rx_mac_ctrl_rcvd); + ESTAT_ADD(rx_xoff_entered); + ESTAT_ADD(rx_frame_too_long_errors); + ESTAT_ADD(rx_jabbers); + ESTAT_ADD(rx_undersize_packets); + ESTAT_ADD(rx_in_length_errors); + ESTAT_ADD(rx_out_length_errors); + ESTAT_ADD(rx_64_or_less_octet_packets); + ESTAT_ADD(rx_65_to_127_octet_packets); + ESTAT_ADD(rx_128_to_255_octet_packets); + ESTAT_ADD(rx_256_to_511_octet_packets); + ESTAT_ADD(rx_512_to_1023_octet_packets); + ESTAT_ADD(rx_1024_to_1522_octet_packets); + ESTAT_ADD(rx_1523_to_2047_octet_packets); + ESTAT_ADD(rx_2048_to_4095_octet_packets); + ESTAT_ADD(rx_4096_to_8191_octet_packets); + ESTAT_ADD(rx_8192_to_9022_octet_packets); + + ESTAT_ADD(tx_octets); + ESTAT_ADD(tx_collisions); + ESTAT_ADD(tx_xon_sent); + ESTAT_ADD(tx_xoff_sent); + ESTAT_ADD(tx_flow_control); + ESTAT_ADD(tx_mac_errors); + ESTAT_ADD(tx_single_collisions); + ESTAT_ADD(tx_mult_collisions); + ESTAT_ADD(tx_deferred); + ESTAT_ADD(tx_excessive_collisions); + ESTAT_ADD(tx_late_collisions); + ESTAT_ADD(tx_collide_2times); + ESTAT_ADD(tx_collide_3times); + ESTAT_ADD(tx_collide_4times); + ESTAT_ADD(tx_collide_5times); + ESTAT_ADD(tx_collide_6times); + ESTAT_ADD(tx_collide_7times); + ESTAT_ADD(tx_collide_8times); + ESTAT_ADD(tx_collide_9times); + ESTAT_ADD(tx_collide_10times); + ESTAT_ADD(tx_collide_11times); + ESTAT_ADD(tx_collide_12times); + ESTAT_ADD(tx_collide_13times); + ESTAT_ADD(tx_collide_14times); + ESTAT_ADD(tx_collide_15times); + ESTAT_ADD(tx_ucast_packets); + ESTAT_ADD(tx_mcast_packets); + ESTAT_ADD(tx_bcast_packets); + ESTAT_ADD(tx_carrier_sense_errors); + ESTAT_ADD(tx_discards); + ESTAT_ADD(tx_errors); + + ESTAT_ADD(dma_writeq_full); + ESTAT_ADD(dma_write_prioq_full); + ESTAT_ADD(rxbds_empty); + ESTAT_ADD(rx_discards); + ESTAT_ADD(rx_errors); + ESTAT_ADD(rx_threshold_hit); + + ESTAT_ADD(dma_readq_full); + ESTAT_ADD(dma_read_prioq_full); + ESTAT_ADD(tx_comp_queue_full); + + ESTAT_ADD(ring_set_send_prod_index); + ESTAT_ADD(ring_status_update); + ESTAT_ADD(nic_irqs); + ESTAT_ADD(nic_avoided_irqs); + ESTAT_ADD(nic_tx_threshold_hit); + + ESTAT_ADD(mbuf_lwm_thresh_hit); + + return estats; + } + + static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) + { + struct tg3 *tp = netdev_priv(dev); + struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!hw_stats) + return old_stats; + + stats->rx_packets = old_stats->rx_packets + + get_stat64(&hw_stats->rx_ucast_packets) + + get_stat64(&hw_stats->rx_mcast_packets) + + get_stat64(&hw_stats->rx_bcast_packets); + + stats->tx_packets = old_stats->tx_packets + + get_stat64(&hw_stats->tx_ucast_packets) + + get_stat64(&hw_stats->tx_mcast_packets) + + get_stat64(&hw_stats->tx_bcast_packets); + + stats->rx_bytes = old_stats->rx_bytes + + get_stat64(&hw_stats->rx_octets); + stats->tx_bytes = old_stats->tx_bytes + + get_stat64(&hw_stats->tx_octets); + + stats->rx_errors = old_stats->rx_errors + + get_stat64(&hw_stats->rx_errors); + stats->tx_errors = old_stats->tx_errors + + get_stat64(&hw_stats->tx_errors) + + get_stat64(&hw_stats->tx_mac_errors) + + get_stat64(&hw_stats->tx_carrier_sense_errors) + + get_stat64(&hw_stats->tx_discards); + + stats->multicast = old_stats->multicast + + get_stat64(&hw_stats->rx_mcast_packets); + stats->collisions = old_stats->collisions + + get_stat64(&hw_stats->tx_collisions); + + stats->rx_length_errors = old_stats->rx_length_errors + + get_stat64(&hw_stats->rx_frame_too_long_errors) + + get_stat64(&hw_stats->rx_undersize_packets); + + stats->rx_over_errors = old_stats->rx_over_errors + + get_stat64(&hw_stats->rxbds_empty); + stats->rx_frame_errors = old_stats->rx_frame_errors + + get_stat64(&hw_stats->rx_align_errors); + stats->tx_aborted_errors = old_stats->tx_aborted_errors + + get_stat64(&hw_stats->tx_discards); + stats->tx_carrier_errors = old_stats->tx_carrier_errors + + get_stat64(&hw_stats->tx_carrier_sense_errors); + + stats->rx_crc_errors = old_stats->rx_crc_errors + + calc_crc_errors(tp); + + stats->rx_missed_errors = old_stats->rx_missed_errors + + get_stat64(&hw_stats->rx_discards); + + stats->rx_dropped = tp->rx_dropped; + + return stats; + } + + static inline u32 calc_crc(unsigned char *buf, int len) + { + u32 reg; + u32 tmp; + int j, k; + + reg = 0xffffffff; + + for (j = 0; j < len; j++) { + reg ^= buf[j]; + + for (k = 0; k < 8; k++) { + tmp = reg & 0x01; + + reg >>= 1; + + if (tmp) + reg ^= 0xedb88320; + } + } + + return ~reg; + } + + static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) + { + /* accept or reject all multicast frames */ + tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); + } + + static void __tg3_set_rx_mode(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + u32 rx_mode; + + rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | + RX_MODE_KEEP_VLAN_TAG); + + #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) + /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG + * flag clear. + */ + if (!tg3_flag(tp, ENABLE_ASF)) + rx_mode |= RX_MODE_KEEP_VLAN_TAG; + #endif + + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode. */ + rx_mode |= RX_MODE_PROMISC; + } else if (dev->flags & IFF_ALLMULTI) { + /* Accept all multicast. */ + tg3_set_multi(tp, 1); + } else if (netdev_mc_empty(dev)) { + /* Reject all multicast. */ + tg3_set_multi(tp, 0); + } else { + /* Accept one or more multicast(s). */ + struct netdev_hw_addr *ha; + u32 mc_filter[4] = { 0, }; + u32 regidx; + u32 bit; + u32 crc; + + netdev_for_each_mc_addr(ha, dev) { + crc = calc_crc(ha->addr, ETH_ALEN); + bit = ~crc & 0x7f; + regidx = (bit & 0x60) >> 5; + bit &= 0x1f; + mc_filter[regidx] |= (1 << bit); + } + + tw32(MAC_HASH_REG_0, mc_filter[0]); + tw32(MAC_HASH_REG_1, mc_filter[1]); + tw32(MAC_HASH_REG_2, mc_filter[2]); + tw32(MAC_HASH_REG_3, mc_filter[3]); + } + + if (rx_mode != tp->rx_mode) { + tp->rx_mode = rx_mode; + tw32_f(MAC_RX_MODE, rx_mode); + udelay(10); + } + } + + static void tg3_set_rx_mode(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + tg3_full_lock(tp, 0); + __tg3_set_rx_mode(dev); + tg3_full_unlock(tp); + } + + static int tg3_get_regs_len(struct net_device *dev) + { + return TG3_REG_BLK_SIZE; + } + + static void tg3_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *_p) + { + struct tg3 *tp = netdev_priv(dev); + + regs->version = 0; + + memset(_p, 0, TG3_REG_BLK_SIZE); + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return; + + tg3_full_lock(tp, 0); + + tg3_dump_legacy_regs(tp, (u32 *)_p); + + tg3_full_unlock(tp); + } + + static int tg3_get_eeprom_len(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + + return tp->nvram_size; + } + + static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) + { + struct tg3 *tp = netdev_priv(dev); + int ret; + u8 *pd; + u32 i, offset, len, b_offset, b_count; + __be32 val; + + if (tg3_flag(tp, NO_NVRAM)) + return -EINVAL; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return -EAGAIN; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + eeprom->magic = TG3_EEPROM_MAGIC; + + if (offset & 3) { + /* adjustments to start on required 4 byte boundary */ + b_offset = offset & 3; + b_count = 4 - b_offset; + if (b_count > len) { + /* i.e. offset=1 len=2 */ + b_count = len; + } + ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); + if (ret) + return ret; + memcpy(data, ((char *)&val) + b_offset, b_count); + len -= b_count; + offset += b_count; + eeprom->len += b_count; + } + + /* read bytes up to the last 4 byte boundary */ + pd = &data[eeprom->len]; + for (i = 0; i < (len - (len & 3)); i += 4) { + ret = tg3_nvram_read_be32(tp, offset + i, &val); + if (ret) { + eeprom->len += i; + return ret; + } + memcpy(pd + i, &val, 4); + } + eeprom->len += i; + + if (len & 3) { + /* read last bytes not ending on 4 byte boundary */ + pd = &data[eeprom->len]; + b_count = len & 3; + b_offset = offset + len - b_count; + ret = tg3_nvram_read_be32(tp, b_offset, &val); + if (ret) + return ret; + memcpy(pd, &val, b_count); + eeprom->len += b_count; + } + return 0; + } + + static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); + + static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) + { + struct tg3 *tp = netdev_priv(dev); + int ret; + u32 offset, len, b_offset, odd_len; + u8 *buf; + __be32 start, end; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return -EAGAIN; + + if (tg3_flag(tp, NO_NVRAM) || + eeprom->magic != TG3_EEPROM_MAGIC) + return -EINVAL; + + offset = eeprom->offset; + len = eeprom->len; + + if ((b_offset = (offset & 3))) { + /* adjustments to start on required 4 byte boundary */ + ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); + if (ret) + return ret; + len += b_offset; + offset &= ~3; + if (len < 4) + len = 4; + } + + odd_len = 0; + if (len & 3) { + /* adjustments to end on required 4 byte boundary */ + odd_len = 1; + len = (len + 3) & ~3; + ret = tg3_nvram_read_be32(tp, offset+len-4, &end); + if (ret) + return ret; + } + + buf = data; + if (b_offset || odd_len) { + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (b_offset) + memcpy(buf, &start, 4); + if (odd_len) + memcpy(buf+len-4, &end, 4); + memcpy(buf + b_offset, data, eeprom->len); + } + + ret = tg3_nvram_write_block(tp, offset, len, buf); + + if (buf != data) + kfree(buf); + + return ret; + } + + static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_gset(phydev, cmd); + } + + cmd->supported = (SUPPORTED_Autoneg); + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + cmd->supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + cmd->supported |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP); + cmd->port = PORT_TP; + } else { + cmd->supported |= SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } + + cmd->advertising = tp->link_config.advertising; + if (tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->link_config.flowctrl & FLOW_CTRL_RX) { + if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Pause; + } else { + cmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } + } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Asym_Pause; + } + } + if (netif_running(dev)) { + ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); + cmd->duplex = tp->link_config.active_duplex; + } else { + ethtool_cmd_speed_set(cmd, SPEED_INVALID); + cmd->duplex = DUPLEX_INVALID; + } + cmd->phy_address = tp->phy_addr; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = tp->link_config.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; + } + + static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct tg3 *tp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_sset(phydev, cmd); + } + + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE && + cmd->duplex != DUPLEX_FULL && + cmd->duplex != DUPLEX_HALF) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_ENABLE) { + u32 mask = ADVERTISED_Autoneg | + ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + mask |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + mask |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + mask |= ADVERTISED_FIBRE; + + if (cmd->advertising & ~mask) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + cmd->advertising &= mask; + } else { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { + if (speed != SPEED_1000) + return -EINVAL; + + if (cmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { + if (speed != SPEED_100 && + speed != SPEED_10) + return -EINVAL; + } + } + + tg3_full_lock(tp, 0); + + tp->link_config.autoneg = cmd->autoneg; + if (cmd->autoneg == AUTONEG_ENABLE) { + tp->link_config.advertising = (cmd->advertising | + ADVERTISED_Autoneg); + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + } else { + tp->link_config.advertising = 0; + tp->link_config.speed = speed; + tp->link_config.duplex = cmd->duplex; + } + + tp->link_config.orig_speed = tp->link_config.speed; + tp->link_config.orig_duplex = tp->link_config.duplex; + tp->link_config.orig_autoneg = tp->link_config.autoneg; + + if (netif_running(dev)) + tg3_setup_phy(tp, 1); + + tg3_full_unlock(tp); + + return 0; + } + + static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) + { + struct tg3 *tp = netdev_priv(dev); + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + strcpy(info->fw_version, tp->fw_ver); + strcpy(info->bus_info, pci_name(tp->pdev)); + } + + static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + wol->wolopts = 0; + if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) + wol->wolopts = WAKE_MAGIC; + memset(&wol->sopass, 0, sizeof(wol->sopass)); + } + + static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct tg3 *tp = netdev_priv(dev); + struct device *dp = &tp->pdev->dev; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + if ((wol->wolopts & WAKE_MAGIC) && + !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) + return -EINVAL; + + device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); + + spin_lock_bh(&tp->lock); + if (device_may_wakeup(dp)) + tg3_flag_set(tp, WOL_ENABLE); + else + tg3_flag_clear(tp, WOL_ENABLE); + spin_unlock_bh(&tp->lock); + + return 0; + } + + static u32 tg3_get_msglevel(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + return tp->msg_enable; + } + + static void tg3_set_msglevel(struct net_device *dev, u32 value) + { + struct tg3 *tp = netdev_priv(dev); + tp->msg_enable = value; + } + + static int tg3_nway_reset(struct net_device *dev) + { + struct tg3 *tp = netdev_priv(dev); + int r; + + if (!netif_running(dev)) + return -EAGAIN; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + return -EINVAL; + + if (tg3_flag(tp, USE_PHYLIB)) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + } else { + u32 bmcr; + + spin_lock_bh(&tp->lock); + r = -EINVAL; + tg3_readphy(tp, MII_BMCR, &bmcr); + if (!tg3_readphy(tp, MII_BMCR, &bmcr) && + ((bmcr & BMCR_ANENABLE) || + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | + BMCR_ANENABLE); + r = 0; + } + spin_unlock_bh(&tp->lock); + } + + return r; + } + + static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) + { + struct tg3 *tp = netdev_priv(dev); + + ering->rx_max_pending = tp->rx_std_ring_mask; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; + else + ering->rx_jumbo_max_pending = 0; + + ering->tx_max_pending = TG3_TX_RING_SIZE - 1; + + ering->rx_pending = tp->rx_pending; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_pending = tp->rx_jumbo_pending; + else + ering->rx_jumbo_pending = 0; + + ering->tx_pending = tp->napi[0].tx_pending; + } + + static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) + { + struct tg3 *tp = netdev_priv(dev); + int i, irq_sync = 0, err = 0; + + if ((ering->rx_pending > tp->rx_std_ring_mask) || + (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || + (ering->tx_pending > TG3_TX_RING_SIZE - 1) || + (ering->tx_pending <= MAX_SKB_FRAGS) || + (tg3_flag(tp, TSO_BUG) && + (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) + return -EINVAL; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tp->rx_pending = ering->rx_pending; + + if (tg3_flag(tp, MAX_RXPEND_64) && + tp->rx_pending > 63) + tp->rx_pending = 63; + tp->rx_jumbo_pending = ering->rx_jumbo_pending; + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].tx_pending = ering->tx_pending; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err) + tg3_phy_start(tp); + + return err; + } + + static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) + { + struct tg3 *tp = netdev_priv(dev); + + epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); + + if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) + epause->rx_pause = 1; + else + epause->rx_pause = 0; + + if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) + epause->tx_pause = 1; + else + epause->tx_pause = 0; + } + + static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) + { + struct tg3 *tp = netdev_priv(dev); + int err = 0; + + if (tg3_flag(tp, USE_PHYLIB)) { + u32 newadv; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + (epause->rx_pause != epause->tx_pause))) + return -EINVAL; + + tp->link_config.flowctrl = 0; + if (epause->rx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_RX; + + if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Pause; + } else + newadv = ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } else if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Asym_Pause; + } else + newadv = 0; + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + u32 oldadv = phydev->advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (oldadv != newadv) { + phydev->advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + phydev->advertising |= newadv; + if (phydev->autoneg) { + /* + * Always renegotiate the link to + * inform our link partner of our + * flow control settings, even if the + * flow control is forced. Let + * tg3_adjust_link() do the final + * flow control setup. + */ + return phy_start_aneg(phydev); + } + } + + if (!epause->autoneg) + tg3_setup_flow_control(tp, 0, 0); + } else { + tp->link_config.orig_advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + tp->link_config.orig_advertising |= newadv; + } + } else { + int irq_sync = 0; + + if (netif_running(dev)) { + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + if (epause->rx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_RX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_RX; + if (epause->tx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_TX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_TX; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + } + + return err; + } + + static int tg3_get_sset_count(struct net_device *dev, int sset) + { + switch (sset) { + case ETH_SS_TEST: + return TG3_NUM_TEST; + case ETH_SS_STATS: + return TG3_NUM_STATS; + default: + return -EOPNOTSUPP; + } + } + + static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) + { + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + case ETH_SS_TEST: + memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); + break; + default: + WARN_ON(1); /* we need a WARN() */ + break; + } + } + + static int tg3_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) + { + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(tp->dev)) + return -EAGAIN; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON | + LED_CTRL_100MBPS_ON | + LED_CTRL_10MBPS_ON | + LED_CTRL_TRAFFIC_OVERRIDE | + LED_CTRL_TRAFFIC_BLINK | + LED_CTRL_TRAFFIC_LED); + break; + + case ETHTOOL_ID_OFF: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE); + break; + + case ETHTOOL_ID_INACTIVE: + tw32(MAC_LED_CTRL, tp->led_ctrl); + break; + } + + return 0; + } + + static void tg3_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *tmp_stats) + { + struct tg3 *tp = netdev_priv(dev); + memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); + } + + static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) + { + int i; + __be32 *buf; + u32 offset = 0, len = 0; + u32 magic, val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return NULL; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == + TG3_NVM_DIRTYPE_EXTVPD) + break; + } + + if (offset != TG3_NVM_DIR_END) { + len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; + if (tg3_nvram_read(tp, offset + 4, &offset)) + return NULL; + + offset = tg3_nvram_logical_addr(tp, offset); + } + } + + if (!offset || !len) { + offset = TG3_NVM_VPD_OFF; + len = TG3_NVM_VPD_LEN; + } + + buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (i = 0; i < len; i += 4) { + /* The data is in little-endian format in NVRAM. + * Use the big-endian read routines to preserve + * the byte order as it exists in NVRAM. + */ + if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) + goto error; + } + } else { + u8 *ptr; + ssize_t cnt; + unsigned int pos = 0; + + ptr = (u8 *)&buf[0]; + for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { + cnt = pci_read_vpd(tp->pdev, pos, + len - pos, ptr); + if (cnt == -ETIMEDOUT || cnt == -EINTR) + cnt = 0; + else if (cnt < 0) + goto error; + } + if (pos != len) + goto error; + } + + *vpdlen = len; + + return buf; + + error: + kfree(buf); + return NULL; + } + + #define NVRAM_TEST_SIZE 0x100 + #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 + #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 + #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c + #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 + #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 + #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 + #define NVRAM_SELFBOOT_HW_SIZE 0x20 + #define NVRAM_SELFBOOT_DATA_SIZE 0x1c + + static int tg3_test_nvram(struct tg3 *tp) + { + u32 csum, magic, len; + __be32 *buf; + int i, j, k, err = 0, size; + + if (tg3_flag(tp, NO_NVRAM)) + return 0; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return -EIO; + + if (magic == TG3_EEPROM_MAGIC) + size = NVRAM_TEST_SIZE; + else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { + if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == + TG3_EEPROM_SB_FORMAT_1) { + switch (magic & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; + break; + case TG3_EEPROM_SB_REVISION_2: + size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; + break; + case TG3_EEPROM_SB_REVISION_3: + size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; + break; + case TG3_EEPROM_SB_REVISION_4: + size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; + break; + case TG3_EEPROM_SB_REVISION_5: + size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; + break; + case TG3_EEPROM_SB_REVISION_6: + size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; + break; + default: + return -EIO; + } + } else + return 0; + } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + size = NVRAM_SELFBOOT_HW_SIZE; + else + return -EIO; + + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + err = -EIO; + for (i = 0, j = 0; i < size; i += 4, j++) { + err = tg3_nvram_read_be32(tp, i, &buf[j]); + if (err) + break; + } + if (i < size) + goto out; + + /* Selfboot format */ + magic = be32_to_cpu(buf[0]); + if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == + TG3_EEPROM_MAGIC_FW) { + u8 *buf8 = (u8 *) buf, csum8 = 0; + + if ((magic & TG3_EEPROM_SB_REVISION_MASK) == + TG3_EEPROM_SB_REVISION_2) { + /* For rev 2, the csum doesn't include the MBA. */ + for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) + csum8 += buf8[i]; + for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) + csum8 += buf8[i]; + } else { + for (i = 0; i < size; i++) + csum8 += buf8[i]; + } + + if (csum8 == 0) { + err = 0; + goto out; + } + + err = -EIO; + goto out; + } + + if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == + TG3_EEPROM_MAGIC_HW) { + u8 data[NVRAM_SELFBOOT_DATA_SIZE]; + u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; + u8 *buf8 = (u8 *) buf; + + /* Separate the parity bits and the data bytes. */ + for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { + if ((i == 0) || (i == 8)) { + int l; + u8 msk; + + for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } else if (i == 16) { + int l; + u8 msk; + + for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + + for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } + data[j++] = buf8[i]; + } + + err = -EIO; + for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { + u8 hw8 = hweight8(data[i]); + + if ((hw8 & 0x1) && parity[i]) + goto out; + else if (!(hw8 & 0x1) && !parity[i]) + goto out; + } + err = 0; + goto out; + } + + err = -EIO; + + /* Bootstrap checksum at offset 0x10 */ + csum = calc_crc((unsigned char *) buf, 0x10); + if (csum != le32_to_cpu(buf[0x10/4])) + goto out; + + /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ + csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); + if (csum != le32_to_cpu(buf[0xfc/4])) + goto out; + + kfree(buf); + + buf = tg3_vpd_readblock(tp, &len); + if (!buf) + return -ENOMEM; + + i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); + if (i > 0) { + j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); + if (j < 0) + goto out; + + if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) + goto out; + + i += PCI_VPD_LRDT_TAG_SIZE; + j = pci_vpd_find_info_keyword((u8 *)buf, i, j, + PCI_VPD_RO_KEYWORD_CHKSUM); + if (j > 0) { + u8 csum8 = 0; + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + + for (i = 0; i <= j; i++) + csum8 += ((u8 *)buf)[i]; + + if (csum8) + goto out; + } + } + + err = 0; + + out: + kfree(buf); + return err; + } + + #define TG3_SERDES_TIMEOUT_SEC 2 + #define TG3_COPPER_TIMEOUT_SEC 6 + + static int tg3_test_link(struct tg3 *tp) + { + int i, max; + + if (!netif_running(tp->dev)) + return -ENODEV; + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + max = TG3_SERDES_TIMEOUT_SEC; + else + max = TG3_COPPER_TIMEOUT_SEC; + + for (i = 0; i < max; i++) { + if (netif_carrier_ok(tp->dev)) + return 0; + + if (msleep_interruptible(1000)) + break; + } + + return -EIO; + } + + /* Only test the commonly used registers */ + static int tg3_test_registers(struct tg3 *tp) + { + int i, is_5705, is_5750; + u32 offset, read_mask, write_mask, val, save_val, read_val; + static struct { + u16 offset; + u16 flags; + #define TG3_FL_5705 0x1 + #define TG3_FL_NOT_5705 0x2 + #define TG3_FL_NOT_5788 0x4 + #define TG3_FL_NOT_5750 0x8 + u32 read_mask; + u32 write_mask; + } reg_tbl[] = { + /* MAC Control Registers */ + { MAC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00ef6f8c }, + { MAC_MODE, TG3_FL_5705, + 0x00000000, 0x01ef6b8c }, + { MAC_STATUS, TG3_FL_NOT_5705, + 0x03800107, 0x00000000 }, + { MAC_STATUS, TG3_FL_5705, + 0x03800100, 0x00000000 }, + { MAC_ADDR_0_HIGH, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_ADDR_0_LOW, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_RX_MTU_SIZE, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_TX_MODE, 0x0000, + 0x00000000, 0x00000070 }, + { MAC_TX_LENGTHS, 0x0000, + 0x00000000, 0x00003fff }, + { MAC_RX_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x000007fc }, + { MAC_RX_MODE, TG3_FL_5705, + 0x00000000, 0x000007dc }, + { MAC_HASH_REG_0, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_1, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_2, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_3, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive Data and Receive BD Initiator Control Registers. */ + { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, + 0x00000000, 0x00000003 }, + { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+0, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+4, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+8, 0x0000, + 0x00000000, 0xffff0002 }, + { RCVDBDI_STD_BD+0xc, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive BD Initiator Control Registers. */ + { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVBDI_STD_THRESH, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + + /* Host Coalescing Control Registers. */ + { HOSTCC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00000004 }, + { HOSTCC_MODE, TG3_FL_5705, + 0x00000000, 0x000000f6 }, + { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + + /* Buffer Manager Control Registers. */ + { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, + 0x00000000, 0x007fff80 }, + { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, + 0x00000000, 0x007fffff }, + { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, + 0x00000000, 0x0000003f }, + { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_MB_HIGH_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + + /* Mailbox Registers */ + { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, + 0x00000000, 0x000007ff }, + { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, + 0x00000000, 0x000001ff }, + + { 0xffff, 0x0000, 0x00000000, 0x00000000 }, + }; + + is_5705 = is_5750 = 0; + if (tg3_flag(tp, 5705_PLUS)) { + is_5705 = 1; + if (tg3_flag(tp, 5750_PLUS)) + is_5750 = 1; + } + + for (i = 0; reg_tbl[i].offset != 0xffff; i++) { + if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) + continue; + + if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) + continue; + + if (tg3_flag(tp, IS_5788) && + (reg_tbl[i].flags & TG3_FL_NOT_5788)) + continue; + + if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) + continue; + + offset = (u32) reg_tbl[i].offset; + read_mask = reg_tbl[i].read_mask; + write_mask = reg_tbl[i].write_mask; + + /* Save the original register content */ + save_val = tr32(offset); + + /* Determine the read-only value. */ + read_val = save_val & read_mask; + + /* Write zero to the register, then make sure the read-only bits + * are not changed and the read/write bits are all zeros. + */ + tw32(offset, 0); + + val = tr32(offset); + + /* Test the read-only and read/write bits. */ + if (((val & read_mask) != read_val) || (val & write_mask)) + goto out; + + /* Write ones to all the bits defined by RdMask and WrMask, then + * make sure the read-only bits are not changed and the + * read/write bits are all ones. + */ + tw32(offset, read_mask | write_mask); + + val = tr32(offset); + + /* Test the read-only bits. */ + if ((val & read_mask) != read_val) + goto out; + + /* Test the read/write bits. */ + if ((val & write_mask) != write_mask) + goto out; + + tw32(offset, save_val); + } + + return 0; + + out: + if (netif_msg_hw(tp)) + netdev_err(tp->dev, + "Register test failed at offset %x\n", offset); + tw32(offset, save_val); + return -EIO; + } + + static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) + { + static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; + int i; + u32 j; + + for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { + for (j = 0; j < len; j += 4) { + u32 val; + + tg3_write_mem(tp, offset + j, test_pattern[i]); + tg3_read_mem(tp, offset + j, &val); + if (val != test_pattern[i]) + return -EIO; + } + } + return 0; + } + + static int tg3_test_memory(struct tg3 *tp) + { + static struct mem_entry { + u32 offset; + u32 len; + } mem_tbl_570x[] = { + { 0x00000000, 0x00b50}, + { 0x00002000, 0x1c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5705[] = { + { 0x00000100, 0x0000c}, + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x01000}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0e000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5755[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x00800}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5906[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00400}, + { 0x00006000, 0x00400}, + { 0x00008000, 0x01000}, + { 0x00010000, 0x01000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5717[] = { + { 0x00000200, 0x00008}, + { 0x00010000, 0x0a000}, + { 0x00020000, 0x13c00}, + { 0xffffffff, 0x00000} + }, mem_tbl_57765[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x09800}, + { 0x00010000, 0x0a000}, + { 0xffffffff, 0x00000} + }; + struct mem_entry *mem_tbl; + int err = 0; + int i; + + if (tg3_flag(tp, 5717_PLUS)) + mem_tbl = mem_tbl_5717; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + mem_tbl = mem_tbl_57765; + else if (tg3_flag(tp, 5755_PLUS)) + mem_tbl = mem_tbl_5755; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mem_tbl = mem_tbl_5906; + else if (tg3_flag(tp, 5705_PLUS)) + mem_tbl = mem_tbl_5705; + else + mem_tbl = mem_tbl_570x; + + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { + err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); + if (err) + break; + } + + return err; + } + + #define TG3_TSO_MSS 500 + + #define TG3_TSO_IP_HDR_LEN 20 + #define TG3_TSO_TCP_HDR_LEN 20 + #define TG3_TSO_TCP_OPT_LEN 12 + + static const u8 tg3_tso_header[] = { + 0x08, 0x00, + 0x45, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x40, 0x00, + 0x40, 0x06, 0x00, 0x00, + 0x0a, 0x00, 0x00, 0x01, + 0x0a, 0x00, 0x00, 0x02, + 0x0d, 0x00, 0xe0, 0x00, + 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x02, 0x00, + 0x80, 0x10, 0x10, 0x00, + 0x14, 0x09, 0x00, 0x00, + 0x01, 0x01, 0x08, 0x0a, + 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, + }; + + static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) + { + u32 rx_start_idx, rx_idx, tx_idx, opaque_key; + u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; + u32 budget; + struct sk_buff *skb, *rx_skb; + u8 *tx_data; + dma_addr_t map; + int num_pkts, tx_len, rx_len, i, err; + struct tg3_rx_buffer_desc *desc; + struct tg3_napi *tnapi, *rnapi; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tnapi = &tp->napi[0]; + rnapi = &tp->napi[0]; + if (tp->irq_cnt > 1) { + if (tg3_flag(tp, ENABLE_RSS)) + rnapi = &tp->napi[1]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi = &tp->napi[1]; + } + coal_now = tnapi->coal_now | rnapi->coal_now; + + err = -EIO; + + tx_len = pktsz; + skb = netdev_alloc_skb(tp->dev, tx_len); + if (!skb) + return -ENOMEM; + + tx_data = skb_put(skb, tx_len); + memcpy(tx_data, tp->dev->dev_addr, 6); + memset(tx_data + 6, 0x0, 8); + + tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); + + if (tso_loopback) { + struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; + + u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + + TG3_TSO_TCP_OPT_LEN; + + memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, + sizeof(tg3_tso_header)); + mss = TG3_TSO_MSS; + + val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); + num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); + + /* Set the total length field in the IP header */ + iph->tot_len = htons((u16)(mss + hdr_len)); + + base_flags = (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + struct tcphdr *th; + val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; + th = (struct tcphdr *)&tx_data[val]; + th->check = 0; + } else + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + mss |= (TG3_TSO_TCP_OPT_LEN << 9); + } else { + base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); + } + + data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); + } else { + num_pkts = 1; + data_off = ETH_HLEN; + } + + for (i = data_off; i < tx_len; i++) + tx_data[i] = (u8) (i & 0xff); + + map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, map)) { + dev_kfree_skb(skb); + return -EIO; + } + + val = tnapi->tx_prod; + tnapi->tx_buffers[val].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + rnapi->coal_now); + + udelay(10); + + rx_start_idx = rnapi->hw_status->idx[0].rx_producer; + + budget = tg3_tx_avail(tnapi); + if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, + base_flags | TXD_FLAG_END, mss, 0)) { + tnapi->tx_buffers[val].skb = NULL; + dev_kfree_skb(skb); + return -EIO; + } + + tnapi->tx_prod++; + + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + tr32_mailbox(tnapi->prodmbox); + + udelay(10); + + /* 350 usec to allow enough time on some 10/100 Mbps devices. */ + for (i = 0; i < 35; i++) { + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + coal_now); + + udelay(10); + + tx_idx = tnapi->hw_status->idx[0].tx_consumer; + rx_idx = rnapi->hw_status->idx[0].rx_producer; + if ((tx_idx == tnapi->tx_prod) && + (rx_idx == (rx_start_idx + num_pkts))) + break; + } + + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); + dev_kfree_skb(skb); + + if (tx_idx != tnapi->tx_prod) + goto out; + + if (rx_idx != rx_start_idx + num_pkts) + goto out; + + val = data_off; + while (rx_idx != rx_start_idx) { + desc = &rnapi->rx_rcb[rx_start_idx++]; + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) + goto out; + + rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) + - ETH_FCS_LEN; + + if (!tso_loopback) { + if (rx_len != tx_len) + goto out; + + if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { + if (opaque_key != RXD_OPAQUE_RING_STD) + goto out; + } else { + if (opaque_key != RXD_OPAQUE_RING_JUMBO) + goto out; + } + } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT != 0xffff) { + goto out; + } + + if (opaque_key == RXD_OPAQUE_RING_STD) { + rx_skb = tpr->rx_std_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], + mapping); + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], + mapping); + } else + goto out; + + pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, + PCI_DMA_FROMDEVICE); + + for (i = data_off; i < rx_len; i++, val++) { + if (*(rx_skb->data + i) != (u8) (val & 0xff)) + goto out; + } + } + + err = 0; + + /* tg3_free_rings will unmap and free the rx_skb */ + out: + return err; + } + + #define TG3_STD_LOOPBACK_FAILED 1 + #define TG3_JMB_LOOPBACK_FAILED 2 + #define TG3_TSO_LOOPBACK_FAILED 4 + #define TG3_LOOPBACK_FAILED \ + (TG3_STD_LOOPBACK_FAILED | \ + TG3_JMB_LOOPBACK_FAILED | \ + TG3_TSO_LOOPBACK_FAILED) + + static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) + { + int err = -EIO; + u32 eee_cap; + + eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + + if (!netif_running(tp->dev)) { + data[0] = TG3_LOOPBACK_FAILED; + data[1] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[2] = TG3_LOOPBACK_FAILED; + goto done; + } + + err = tg3_reset_hw(tp, 1); + if (err) { + data[0] = TG3_LOOPBACK_FAILED; + data[1] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[2] = TG3_LOOPBACK_FAILED; + goto done; + } + + if (tg3_flag(tp, ENABLE_RSS)) { + int i; + + /* Reroute all rx packets to the 1st queue */ + for (i = MAC_RSS_INDIR_TBL_0; + i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) + tw32(i, 0x0); + } + + /* HW errata - mac loopback fails in some cases on 5780. + * Normal traffic and PHY loopback are not affected by + * errata. Also, the MAC loopback test is deprecated for + * all newer ASIC revisions. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) { + tg3_mac_loopback(tp, true); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[0] |= TG3_STD_LOOPBACK_FAILED; + + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, false)) + data[0] |= TG3_JMB_LOOPBACK_FAILED; + + tg3_mac_loopback(tp, false); + } + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !tg3_flag(tp, USE_PHYLIB)) { + int i; + + tg3_phy_lpbk_set(tp, 0, false); + + /* Wait for link */ + for (i = 0; i < 100; i++) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + break; + mdelay(1); + } + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[1] |= TG3_STD_LOOPBACK_FAILED; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[1] |= TG3_TSO_LOOPBACK_FAILED; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, false)) + data[1] |= TG3_JMB_LOOPBACK_FAILED; + + if (do_extlpbk) { + tg3_phy_lpbk_set(tp, 0, true); + + /* All link indications report up, but the hardware + * isn't really ready for about 20 msec. Double it + * to be sure. + */ + mdelay(40); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[2] |= TG3_STD_LOOPBACK_FAILED; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[2] |= TG3_TSO_LOOPBACK_FAILED; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, false)) + data[2] |= TG3_JMB_LOOPBACK_FAILED; + } + + /* Re-enable gphy autopowerdown. */ + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + } + + err = (data[0] | data[1] | data[2]) ? -EIO : 0; + + done: + tp->phy_flags |= eee_cap; + + return err; + } + + static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, + u64 *data) + { + struct tg3 *tp = netdev_priv(dev); + bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; + + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && + tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + + memset(data, 0, sizeof(u64) * TG3_NUM_TEST); + + if (tg3_test_nvram(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[0] = 1; + } + if (!doextlpbk && tg3_test_link(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + data[1] = 1; + } + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int err, err2 = 0, irq_sync = 0; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tg3_halt(tp, RESET_KIND_SUSPEND, 1); + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!tg3_flag(tp, 5705_PLUS)) + tg3_halt_cpu(tp, TX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + tg3_phy_reset(tp); + + if (tg3_test_registers(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[2] = 1; + } + + if (tg3_test_memory(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[3] = 1; + } + + if (doextlpbk) + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + + if (tg3_test_loopback(tp, &data[4], doextlpbk)) + etest->flags |= ETH_TEST_FL_FAILED; + + tg3_full_unlock(tp); + + if (tg3_test_interrupt(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[7] = 1; + } + + tg3_full_lock(tp, 0); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + if (netif_running(dev)) { + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, 1); + if (!err2) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err2) + tg3_phy_start(tp); + } + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + tg3_power_down(tp); + + } + + static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + { + struct mii_ioctl_data *data = if_mii(ifr); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_mii_ioctl(phydev, ifr, cmd); + } + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = tp->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u32 mii_regval; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); + spin_unlock_bh(&tp->lock); + + data->val_out = mii_regval; + + return err; + } + + case SIOCSMIIREG: + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); + spin_unlock_bh(&tp->lock); + + return err; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; + } + + static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) + { + struct tg3 *tp = netdev_priv(dev); + + memcpy(ec, &tp->coal, sizeof(*ec)); + return 0; + } + + static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) + { + struct tg3 *tp = netdev_priv(dev); + u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; + u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; + + if (!tg3_flag(tp, 5705_PLUS)) { + max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; + max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; + max_stat_coal_ticks = MAX_STAT_COAL_TICKS; + min_stat_coal_ticks = MIN_STAT_COAL_TICKS; + } + + if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || + (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || + (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || + (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || + (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || + (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || + (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || + (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || + (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || + (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) + return -EINVAL; + + /* No rx interrupts will be generated if both are zero */ + if ((ec->rx_coalesce_usecs == 0) && + (ec->rx_max_coalesced_frames == 0)) + return -EINVAL; + + /* No tx interrupts will be generated if both are zero */ + if ((ec->tx_coalesce_usecs == 0) && + (ec->tx_max_coalesced_frames == 0)) + return -EINVAL; + + /* Only copy relevant parameters, ignore all others. */ + tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; + tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; + tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; + tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; + tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; + tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; + tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; + + if (netif_running(dev)) { + tg3_full_lock(tp, 0); + __tg3_set_coalesce(tp, &tp->coal); + tg3_full_unlock(tp); + } + return 0; + } + + static const struct ethtool_ops tg3_ethtool_ops = { + .get_settings = tg3_get_settings, + .set_settings = tg3_set_settings, + .get_drvinfo = tg3_get_drvinfo, + .get_regs_len = tg3_get_regs_len, + .get_regs = tg3_get_regs, + .get_wol = tg3_get_wol, + .set_wol = tg3_set_wol, + .get_msglevel = tg3_get_msglevel, + .set_msglevel = tg3_set_msglevel, + .nway_reset = tg3_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = tg3_get_eeprom_len, + .get_eeprom = tg3_get_eeprom, + .set_eeprom = tg3_set_eeprom, + .get_ringparam = tg3_get_ringparam, + .set_ringparam = tg3_set_ringparam, + .get_pauseparam = tg3_get_pauseparam, + .set_pauseparam = tg3_set_pauseparam, + .self_test = tg3_self_test, + .get_strings = tg3_get_strings, + .set_phys_id = tg3_set_phys_id, + .get_ethtool_stats = tg3_get_ethtool_stats, + .get_coalesce = tg3_get_coalesce, + .set_coalesce = tg3_set_coalesce, + .get_sset_count = tg3_get_sset_count, + }; + + static void __devinit tg3_get_eeprom_size(struct tg3 *tp) + { + u32 cursize, val, magic; + + tp->nvram_size = EEPROM_CHIP_SIZE; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return; + + if ((magic != TG3_EEPROM_MAGIC) && + ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && + ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) + return; + + /* + * Size the chip by reading offsets at increasing powers of two. + * When we encounter our validation signature, we know the addressing + * has wrapped around, and thus have our chip size. + */ + cursize = 0x10; + + while (cursize < tp->nvram_size) { + if (tg3_nvram_read(tp, cursize, &val) != 0) + return; + + if (val == magic) + break; + + cursize <<= 1; + } + + tp->nvram_size = cursize; + } + + static void __devinit tg3_get_nvram_size(struct tg3 *tp) + { + u32 val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) + return; + + /* Selfboot format */ + if (val != TG3_EEPROM_MAGIC) { + tg3_get_eeprom_size(tp); + return; + } + + if (tg3_nvram_read(tp, 0xf0, &val) == 0) { + if (val != 0) { + /* This is confusing. We want to operate on the + * 16-bit value at offset 0xf2. The tg3_nvram_read() + * call will read from NVRAM and byteswap the data + * according to the byteswapping settings for all + * other register accesses. This ensures the data we + * want will always reside in the lower 16-bits. + * However, the data in NVRAM is in LE format, which + * means the data from the NVRAM read will always be + * opposite the endianness of the CPU. The 16-bit + * byteswap then brings the data to CPU endianness. + */ + tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; + return; + } + } + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + } + + static void __devinit tg3_get_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { + tg3_flag_set(tp, FLASH); + } else { + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + tg3_flag(tp, 5780_CLASS)) { + switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { + case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; + break; + case FLASH_VENDOR_ATMEL_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ST: + tp->nvram_jedecnum = JEDEC_ST; + tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_SAIFUN: + tp->nvram_jedecnum = JEDEC_SAIFUN; + tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; + break; + case FLASH_VENDOR_SST_SMALL: + case FLASH_VENDOR_SST_LARGE: + tp->nvram_jedecnum = JEDEC_SST; + tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; + break; + } + } else { + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + } + } + + static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) + { + switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { + case FLASH_5752PAGE_SIZE_256: + tp->nvram_pagesize = 256; + break; + case FLASH_5752PAGE_SIZE_512: + tp->nvram_pagesize = 512; + break; + case FLASH_5752PAGE_SIZE_1K: + tp->nvram_pagesize = 1024; + break; + case FLASH_5752PAGE_SIZE_2K: + tp->nvram_pagesize = 2048; + break; + case FLASH_5752PAGE_SIZE_4K: + tp->nvram_pagesize = 4096; + break; + case FLASH_5752PAGE_SIZE_264: + tp->nvram_pagesize = 264; + break; + case FLASH_5752PAGE_SIZE_528: + tp->nvram_pagesize = 528; + break; + } + } + + static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) + tg3_flag_set(tp, PROTECTED_NVRAM); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + } + + if (tg3_flag(tp, FLASH)) { + tg3_nvram_get_pagesize(tp, nvcfg1); + } else { + /* For eeprom, set pagesize to maximum eeprom size */ + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } + } + + static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) + { + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_5: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || + nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) + tp->nvram_size = (protect ? 0x3e200 : + TG3_NVRAM_SIZE_512KB); + else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_128KB); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_128KB); + else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_128KB : + TG3_NVRAM_SIZE_512KB); + break; + } + } + + static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } + } + + static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) + { + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } + + if (protect) { + tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); + } else { + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = TG3_NVRAM_SIZE_2MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + } + } + } + + static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) + { + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + } + + static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ST_M45PE10: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_5752VENDOR_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + } + + + static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) + { + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_EEPROM: + case FLASH_5717VENDOR_MICRO_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5717VENDOR_ATMEL_MDB011D: + case FLASH_5717VENDOR_ATMEL_ADB011B: + case FLASH_5717VENDOR_ATMEL_ADB011D: + case FLASH_5717VENDOR_ATMEL_MDB021D: + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + case FLASH_5717VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_MDB021D: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5717VENDOR_ST_M_M25PE10: + case FLASH_5717VENDOR_ST_A_M25PE10: + case FLASH_5717VENDOR_ST_M_M45PE10: + case FLASH_5717VENDOR_ST_A_M45PE10: + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + case FLASH_5717VENDOR_ST_25USPT: + case FLASH_5717VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + } + + static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) + { + u32 nvcfg1, nvmpinstrp; + + nvcfg1 = tr32(NVRAM_CFG1); + nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; + + switch (nvmpinstrp) { + case FLASH_5720_EEPROM_HD: + case FLASH_5720_EEPROM_LD: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + if (nvmpinstrp == FLASH_5720_EEPROM_HD) + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + else + tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; + return; + case FLASH_5720VENDOR_M_ATMEL_DB011D: + case FLASH_5720VENDOR_A_ATMEL_DB011B: + case FLASH_5720VENDOR_A_ATMEL_DB011D: + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + case FLASH_5720VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5720VENDOR_M_ST_M25PE10: + case FLASH_5720VENDOR_M_ST_M45PE10: + case FLASH_5720VENDOR_A_ST_M25PE10: + case FLASH_5720VENDOR_A_ST_M45PE10: + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + case FLASH_5720VENDOR_ST_25USPT: + case FLASH_5720VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + } + + /* Chips other than 5700/5701 use the NVRAM for fetching info. */ + static void __devinit tg3_nvram_init(struct tg3 *tp) + { + tw32_f(GRC_EEPROM_ADDR, + (EEPROM_ADDR_FSM_RESET | + (EEPROM_DEFAULT_CLOCK_PERIOD << + EEPROM_ADDR_CLKPERD_SHIFT))); + + msleep(1); + + /* Enable seeprom accesses. */ + tw32_f(GRC_LOCAL_CTRL, + tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); + udelay(100); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { + tg3_flag_set(tp, NVRAM); + + if (tg3_nvram_lock(tp)) { + netdev_warn(tp->dev, + "Cannot get nvram lock, %s failed\n", + __func__); + return; + } + tg3_enable_nvram_access(tp); + + tp->nvram_size = 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + tg3_get_5752_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tg3_get_5755_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_get_5787_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tg3_get_5761_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_get_5906_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_get_57780_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_get_5717_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_get_5720_nvram_info(tp); + else + tg3_get_nvram_info(tp); + + if (tp->nvram_size == 0) + tg3_get_nvram_size(tp); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + + } else { + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); + + tg3_get_eeprom_size(tp); + } + } + + static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, + u32 offset, u32 len, u8 *buf) + { + int i, j, rc = 0; + u32 val; + + for (i = 0; i < len; i += 4) { + u32 addr; + __be32 data; + + addr = offset + i; + + memcpy(&data, buf + i, 4); + + /* + * The SEEPROM interface expects the data to always be opposite + * the native endian format. We accomplish this by reversing + * all the operations that would have been performed on the + * data from a call to tg3_nvram_read_be32(). + */ + tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); + + val = tr32(GRC_EEPROM_ADDR); + tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); + + val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, val | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + (addr & EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_START | + EEPROM_ADDR_WRITE); + + for (j = 0; j < 1000; j++) { + val = tr32(GRC_EEPROM_ADDR); + + if (val & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(val & EEPROM_ADDR_COMPLETE)) { + rc = -EBUSY; + break; + } + } + + return rc; + } + + /* offset and length are dword aligned */ + static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) + { + int ret = 0; + u32 pagesize = tp->nvram_pagesize; + u32 pagemask = pagesize - 1; + u32 nvram_cmd; + u8 *tmp; + + tmp = kmalloc(pagesize, GFP_KERNEL); + if (tmp == NULL) + return -ENOMEM; + + while (len) { + int j; + u32 phy_addr, page_off, size; + + phy_addr = offset & ~pagemask; + + for (j = 0; j < pagesize; j += 4) { + ret = tg3_nvram_read_be32(tp, phy_addr + j, + (__be32 *) (tmp + j)); + if (ret) + break; + } + if (ret) + break; + + page_off = offset & pagemask; + size = pagesize; + if (len < size) + size = len; + + len -= size; + + memcpy(tmp + page_off, buf, size); + + offset = offset + (pagesize - page_off); + + tg3_enable_nvram_access(tp); + + /* + * Before we can erase the flash page, we need + * to issue a special "write enable" command. + */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Erase the target page */ + tw32(NVRAM_ADDR, phy_addr); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Issue another write enable to start the write. */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + for (j = 0; j < pagesize; j += 4) { + __be32 data; + + data = *((__be32 *) (tmp + j)); + + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + tw32(NVRAM_ADDR, phy_addr + j); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | + NVRAM_CMD_WR; + + if (j == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + else if (j == (pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) + break; + } + if (ret) + break; + } + + nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; + tg3_nvram_exec_cmd(tp, nvram_cmd); + + kfree(tmp); + + return ret; + } + + /* offset and length are dword aligned */ + static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) + { + int i, ret = 0; + + for (i = 0; i < len; i += 4, offset += 4) { + u32 page_off, phy_addr, nvram_cmd; + __be32 data; + + memcpy(&data, buf + i, 4); + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + page_off = offset % tp->nvram_pagesize; + + phy_addr = tg3_nvram_phys_addr(tp, offset); + + tw32(NVRAM_ADDR, phy_addr); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; + + if (page_off == 0 || i == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + if (page_off == (tp->nvram_pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if (i == (len - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && + !tg3_flag(tp, 5755_PLUS) && + (tp->nvram_jedecnum == JEDEC_ST) && + (nvram_cmd & NVRAM_CMD_FIRST)) { + + if ((ret = tg3_nvram_exec_cmd(tp, + NVRAM_CMD_WREN | NVRAM_CMD_GO | + NVRAM_CMD_DONE))) + + break; + } + if (!tg3_flag(tp, FLASH)) { + /* We always do complete word writes to eeprom. */ + nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); + } + + if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) + break; + } + return ret; + } + + /* offset and length are dword aligned */ + static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) + { + int ret; + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & + ~GRC_LCLCTRL_GPIO_OUTPUT1); + udelay(40); + } + + if (!tg3_flag(tp, NVRAM)) { + ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); + } else { + u32 grc_mode; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) + tw32(NVRAM_WRITE1, 0x406); + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); + + if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { + ret = tg3_nvram_write_block_buffered(tp, offset, len, + buf); + } else { + ret = tg3_nvram_write_block_unbuffered(tp, offset, len, + buf); + } + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + } + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(40); + } + + return ret; + } + + struct subsys_tbl_ent { + u16 subsys_vendor, subsys_devid; + u32 phy_id; + }; + + static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { + /* Broadcom boards. */ + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, + + /* 3com boards. */ + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, + + /* DELL boards. */ + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, + + /* Compaq boards. */ + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, + + /* IBM boards. */ + { TG3PCI_SUBVENDOR_ID_IBM, + TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } + }; + + static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) + { + int i; + + for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { + if ((subsys_id_to_phy_id[i].subsys_vendor == + tp->pdev->subsystem_vendor) && + (subsys_id_to_phy_id[i].subsys_devid == + tp->pdev->subsystem_device)) + return &subsys_id_to_phy_id[i]; + } + return NULL; + } + + static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) + { + u32 val; + + tp->phy_id = TG3_PHY_ID_INVALID; + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + /* Assume an onboard device and WOL capable by default. */ + tg3_flag_set(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, WOL_CAP); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + val = tr32(VCPU_CFGSHDW); + if (val & VCPU_CFGSHDW_ASPM_DBNC) + tg3_flag_set(tp, ASPM_WORKAROUND); + if ((val & VCPU_CFGSHDW_WOL_ENABLE) && + (val & VCPU_CFGSHDW_WOL_MAGPKT)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + goto done; + } + + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg, led_cfg; + u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; + int eeprom_phy_serdes = 0; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + tp->nic_sram_data_cfg = nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); + ver >>= NIC_SRAM_DATA_VER_SHIFT; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && + (ver > 0) && (ver < 0x100)) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); + + if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == + NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) + eeprom_phy_serdes = 1; + + tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); + if (nic_phy_id != 0) { + u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; + u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; + + eeprom_phy_id = (id1 >> 16) << 10; + eeprom_phy_id |= (id2 & 0xfc00) << 16; + eeprom_phy_id |= (id2 & 0x03ff) << 0; + } else + eeprom_phy_id = 0; + + tp->phy_id = eeprom_phy_id; + if (eeprom_phy_serdes) { + if (!tg3_flag(tp, 5705_PLUS)) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags |= TG3_PHYFLG_MII_SERDES; + } + + if (tg3_flag(tp, 5750_PLUS)) + led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | + SHASTA_EXT_LED_MODE_MASK); + else + led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; + + switch (led_cfg) { + default: + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_MAC: + tp->led_ctrl = LED_CTRL_MODE_MAC; + + /* Default to PHY_1_MODE if 0 (MAC_MODE) is + * read on some older 5700/5701 bootcode. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5701) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + break; + + case SHASTA_EXT_LED_SHARED: + tp->led_ctrl = LED_CTRL_MODE_SHARED; + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + break; + + case SHASTA_EXT_LED_MAC: + tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; + break; + + case SHASTA_EXT_LED_COMBO: + tp->led_ctrl = LED_CTRL_MODE_COMBO; + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + break; + + } + + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && + tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { + tg3_flag_set(tp, EEPROM_WRITE_PROT); + if ((tp->pdev->subsystem_vendor == + PCI_VENDOR_ID_ARIMA) && + (tp->pdev->subsystem_device == 0x205a || + tp->pdev->subsystem_device == 0x2063)) + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + } else { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + } + + if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ENABLE_APE); + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && + !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) + tg3_flag_clear(tp, WOL_CAP); + + if (tg3_flag(tp, WOL_CAP) && + (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + + if (cfg2 & (1 << 17)) + tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; + + /* serdes signal pre-emphasis in register 0x590 set by */ + /* bootcode if bit 18 is set */ + if (cfg2 & (1 << 18)) + tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; + + if ((tg3_flag(tp, 57765_PLUS) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && + (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) + tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; + + if (tg3_flag(tp, PCI_EXPRESS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS)) { + u32 cfg3; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); + if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) + tg3_flag_set(tp, ASPM_WORKAROUND); + } + + if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) + tg3_flag_set(tp, RGMII_INBAND_DISABLE); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); + } + done: + if (tg3_flag(tp, WOL_CAP)) + device_set_wakeup_enable(&tp->pdev->dev, + tg3_flag(tp, WOL_ENABLE)); + else + device_set_wakeup_capable(&tp->pdev->dev, false); + } + + static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) + { + int i; + u32 val; + + tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); + tw32(OTP_CTRL, cmd); + + /* Wait for up to 1 ms for command to execute. */ + for (i = 0; i < 100; i++) { + val = tr32(OTP_STATUS); + if (val & OTP_STATUS_CMD_DONE) + break; + udelay(10); + } + + return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; + } + + /* Read the gphy configuration from the OTP region of the chip. The gphy + * configuration is a 32-bit value that straddles the alignment boundary. + * We do two 32-bit reads and then shift and merge the results. + */ + static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) + { + u32 bhalf_otp, thalf_otp; + + tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) + return 0; + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + thalf_otp = tr32(OTP_READ_DATA); + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + bhalf_otp = tr32(OTP_READ_DATA); + + return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); + } + + static void __devinit tg3_phy_init_link_config(struct tg3 *tp) + { + u32 adv = ADVERTISED_Autoneg | + ADVERTISED_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + adv |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + adv |= ADVERTISED_FIBRE; + + tp->link_config.advertising = adv; + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tp->link_config.orig_speed = SPEED_INVALID; + tp->link_config.orig_duplex = DUPLEX_INVALID; + tp->link_config.orig_autoneg = AUTONEG_INVALID; + } + + static int __devinit tg3_phy_probe(struct tg3 *tp) + { + u32 hw_phy_id_1, hw_phy_id_2; + u32 hw_phy_id, hw_phy_id_masked; + int err; + + /* flow control autonegotiation is default behavior */ + tg3_flag_set(tp, PAUSE_AUTONEG); + tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + + if (tg3_flag(tp, USE_PHYLIB)) + return tg3_phy_init(tp); + + /* Reading the PHY ID register can conflict with ASF + * firmware access to the PHY hardware. + */ + err = 0; + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { + hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; + } else { + /* Now read the physical PHY_ID from the chip and verify + * that it is sane. If it doesn't look good, we fall back + * to either the hard-coded table based PHY_ID and failing + * that the value found in the eeprom area. + */ + err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); + err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); + + hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; + hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; + hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; + + hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; + } + + if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { + tp->phy_id = hw_phy_id; + if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; + } else { + if (tp->phy_id != TG3_PHY_ID_INVALID) { + /* Do nothing, phy ID already set up in + * tg3_get_eeprom_hw_cfg(). + */ + } else { + struct subsys_tbl_ent *p; + + /* No eeprom signature? Try the hardcoded + * subsys device table. + */ + p = tg3_lookup_by_subsys(tp); + if (!p) + return -ENODEV; + + tp->phy_id = p->phy_id; + if (!tp->phy_id || + tp->phy_id == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + } + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 || + (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 && + tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && + tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + + tg3_phy_init_link_config(tp); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + !tg3_flag(tp, ENABLE_APE) && + !tg3_flag(tp, ENABLE_ASF)) { + u32 bmsr, mask; + + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + goto skip_phy_reset; + + err = tg3_phy_reset(tp); + if (err) + return err; + + tg3_phy_set_wirespeed(tp); + + mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); + if (!tg3_copper_is_advertising_all(tp, mask)) { + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } + } + + skip_phy_reset: + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + err = tg3_init_5401phy_dsp(tp); + } + + return err; + } + + static void __devinit tg3_read_vpd(struct tg3 *tp) + { + u8 *vpd_data; + unsigned int block_end, rosize, len; + u32 vpdlen; + int j, i = 0; + + vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); + if (!vpd_data) + goto out_no_vpd; + + i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; + + rosize = pci_vpd_lrdt_size(&vpd_data[i]); + block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; + i += PCI_VPD_LRDT_TAG_SIZE; + + if (block_end > vpdlen) + goto out_not_found; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (j > 0) { + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len != 4 || + memcmp(&vpd_data[j], "1028", 4)) + goto partno; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (j < 0) + goto partno; + + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end) + goto partno; + + memcpy(tp->fw_ver, &vpd_data[j], len); + strncat(tp->fw_ver, " bc ", vpdlen - len - 1); + } + + partno: + i = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_PARTNO); + if (i < 0) + goto out_not_found; + + len = pci_vpd_info_field_size(&vpd_data[i]); + + i += PCI_VPD_INFO_FLD_HDR_SIZE; + if (len > TG3_BPN_SIZE || + (len + i) > vpdlen) + goto out_not_found; + + memcpy(tp->board_part_number, &vpd_data[i], len); + + out_not_found: + kfree(vpd_data); + if (tp->board_part_number[0]) + return; + + out_no_vpd: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) + strcpy(tp->board_part_number, "BCM5717"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) + strcpy(tp->board_part_number, "BCM5718"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) + strcpy(tp->board_part_number, "BCM57780"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) + strcpy(tp->board_part_number, "BCM57760"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) + strcpy(tp->board_part_number, "BCM57790"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) + strcpy(tp->board_part_number, "BCM57788"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) + strcpy(tp->board_part_number, "BCM57761"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) + strcpy(tp->board_part_number, "BCM57765"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) + strcpy(tp->board_part_number, "BCM57781"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) + strcpy(tp->board_part_number, "BCM57785"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) + strcpy(tp->board_part_number, "BCM57791"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + strcpy(tp->board_part_number, "BCM57795"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + strcpy(tp->board_part_number, "BCM95906"); + } else { + nomatch: + strcpy(tp->board_part_number, "none"); + } + } + + static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) + { + u32 val; + + if (tg3_nvram_read(tp, offset, &val) || + (val & 0xfc000000) != 0x0c000000 || + tg3_nvram_read(tp, offset + 4, &val) || + val != 0) + return 0; + + return 1; + } + + static void __devinit tg3_read_bc_ver(struct tg3 *tp) + { + u32 val, offset, start, ver_offset; + int i, dst_off; + bool newver = false; + + if (tg3_nvram_read(tp, 0xc, &offset) || + tg3_nvram_read(tp, 0x4, &start)) + return; + + offset = tg3_nvram_logical_addr(tp, offset); + + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val & 0xfc000000) == 0x0c000000) { + if (tg3_nvram_read(tp, offset + 4, &val)) + return; + + if (val == 0) + newver = true; + } + + dst_off = strlen(tp->fw_ver); + + if (newver) { + if (TG3_VER_SIZE - dst_off < 16 || + tg3_nvram_read(tp, offset + 8, &ver_offset)) + return; + + offset = offset + ver_offset - start; + for (i = 0; i < 16; i += 4) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset + i, &v)) + return; + + memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); + } + } else { + u32 major, minor; + + if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) + return; + + major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> + TG3_NVM_BCVER_MAJSFT; + minor = ver_offset & TG3_NVM_BCVER_MINMSK; + snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, + "v%d.%02d", major, minor); + } + } + + static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) + { + u32 val, major, minor; + + /* Use native endian representation */ + if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) + return; + + major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> + TG3_NVM_HWSB_CFG1_MAJSFT; + minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> + TG3_NVM_HWSB_CFG1_MINSFT; + + snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); + } + + static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) + { + u32 offset, major, minor, build; + + strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); + + if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) + return; + + switch (val & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + offset = TG3_EEPROM_SB_F1R0_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_2: + offset = TG3_EEPROM_SB_F1R2_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_3: + offset = TG3_EEPROM_SB_F1R3_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_4: + offset = TG3_EEPROM_SB_F1R4_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_5: + offset = TG3_EEPROM_SB_F1R5_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_6: + offset = TG3_EEPROM_SB_F1R6_EDH_OFF; + break; + default: + return; + } + + if (tg3_nvram_read(tp, offset, &val)) + return; + + build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> + TG3_EEPROM_SB_EDH_BLD_SHFT; + major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> + TG3_EEPROM_SB_EDH_MAJ_SHFT; + minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; + + if (minor > 99 || build > 26) + return; + + offset = strlen(tp->fw_ver); + snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, + " v%d.%02d", major, minor); + + if (build > 0) { + offset = strlen(tp->fw_ver); + if (offset < TG3_VER_SIZE - 1) + tp->fw_ver[offset] = 'a' + build - 1; + } + } + + static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) + { + u32 val, offset, start; + int i, vlen; + + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) + break; + } + + if (offset == TG3_NVM_DIR_END) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + start = 0x08000000; + else if (tg3_nvram_read(tp, offset - 4, &start)) + return; + + if (tg3_nvram_read(tp, offset + 4, &offset) || + !tg3_fw_img_is_valid(tp, offset) || + tg3_nvram_read(tp, offset + 8, &val)) + return; + + offset += val - start; + + vlen = strlen(tp->fw_ver); + + tp->fw_ver[vlen++] = ','; + tp->fw_ver[vlen++] = ' '; + + for (i = 0; i < 4; i++) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset, &v)) + return; + + offset += sizeof(v); + + if (vlen > TG3_VER_SIZE - sizeof(v)) { + memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); + break; + } + + memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); + vlen += sizeof(v); + } + } + + static void __devinit tg3_read_dash_ver(struct tg3 *tp) + { + int vlen; + u32 apedata; + char *fwtype; + + if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); + + if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { + tg3_flag_set(tp, APE_HAS_NCSI); + fwtype = "NCSI"; + } else { + fwtype = "DASH"; + } + + vlen = strlen(tp->fw_ver); + + snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", + fwtype, + (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, + (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, + (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, + (apedata & APE_FW_VERSION_BLDMSK)); + } + + static void __devinit tg3_read_fw_ver(struct tg3 *tp) + { + u32 val; + bool vpd_vers = false; + + if (tp->fw_ver[0] != 0) + vpd_vers = true; + + if (tg3_flag(tp, NO_NVRAM)) { + strcat(tp->fw_ver, "sb"); + return; + } + + if (tg3_nvram_read(tp, 0, &val)) + return; + + if (val == TG3_EEPROM_MAGIC) + tg3_read_bc_ver(tp); + else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) + tg3_read_sb_ver(tp, val); + else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + tg3_read_hwsb_ver(tp); + else + return; + + if (vpd_vers) + goto done; + + if (tg3_flag(tp, ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF)) + tg3_read_dash_ver(tp); + } else if (tg3_flag(tp, ENABLE_ASF)) { + tg3_read_mgmtfw_ver(tp); + } + + done: + tp->fw_ver[TG3_VER_SIZE - 1] = 0; + } + + static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); + + static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) + { + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + return TG3_RX_RET_MAX_SIZE_5717; + else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) + return TG3_RX_RET_MAX_SIZE_5700; + else + return TG3_RX_RET_MAX_SIZE_5705; + } + + static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, + { }, + }; + + static int __devinit tg3_get_invariants(struct tg3 *tp) + { + u32 misc_ctrl_reg; + u32 pci_state_reg, grc_misc_cfg; + u32 val; + u16 pci_cmd; + int err; + + /* Force memory write invalidate off. If we leave it on, + * then on 5700_BX chips we have to enable a workaround. + * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary + * to match the cacheline size. The Broadcom driver have this + * workaround but turns MWI off all the times so never uses + * it. This seems to suggest that the workaround is insufficient. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + /* Important! -- Make sure register accesses are byteswapped + * correctly. Also, for those chips that require it, make + * sure that indirect register accesses are enabled before + * the first operation. + */ + pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + &misc_ctrl_reg); + tp->misc_host_ctrl |= (misc_ctrl_reg & + MISC_HOST_CTRL_CHIPREV); + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + tp->pci_chip_rev_id = (misc_ctrl_reg >> + MISC_HOST_CTRL_CHIPREV_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { + u32 prod_id_asic_rev; + + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN2_PRODID_ASICREV, + &prod_id_asic_rev); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN15_PRODID_ASICREV, + &prod_id_asic_rev); + else + pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, + &prod_id_asic_rev); + + tp->pci_chip_rev_id = prod_id_asic_rev; + } + + /* Wrong chip ID in 5752 A0. This code can be removed later + * as A0 is not in production. + */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) + tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; + + /* If we have 5702/03 A1 or A2 on certain ICH chipsets, + * we need to disable memory and use config. cycles + * only to access all registers. The 5702/03 chips + * can mistakenly decode the special cycles from the + * ICH chipsets as memory write cycles, causing corruption + * of register and memory space. Only certain ICH bridges + * will drive special cycles with non-zero data during the + * address phase which can fall within the 5703's address + * range. This is not an ICH bug as the PCI spec allows + * non-zero address during special cycles. However, only + * these ICH bridges are known to drive non-zero addresses + * during special cycles. + * + * Since special cycles do not cross PCI bridges, we only + * enable this workaround if the 5703 is on the secondary + * bus of these ICH bridges. + */ + if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || + (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + u32 rev; + } ich_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, + 0xa }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, + PCI_ANY_ID }, + { }, + }; + struct tg3_dev_id *pci_id = &ich_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (pci_id->rev != PCI_ANY_ID) { + if (bridge->revision > pci_id->rev) + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number == + tp->pdev->bus->number)) { + tg3_flag_set(tp, ICH_WORKAROUND); + pci_dev_put(bridge); + break; + } + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + } bridge_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, + { }, + }; + struct tg3_dev_id *pci_id = &bridge_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, + pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->subordinate >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 5701_DMA_BUG); + pci_dev_put(bridge); + break; + } + } + } + + /* The EPB bridge inside 5714, 5715, and 5780 cannot support + * DMA addresses > 40-bit. This bridge may have other additional + * 57xx devices behind it in some 4-port NIC designs for example. + * Any tg3 device found behind the bridge will also need the 40-bit + * DMA workaround. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + tg3_flag_set(tp, 5780_CLASS); + tg3_flag_set(tp, 40BIT_DMA_BUG); + tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); + } else { + struct pci_dev *bridge = NULL; + + do { + bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, + PCI_DEVICE_ID_SERVERWORKS_EPB, + bridge); + if (bridge && bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->subordinate >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 40BIT_DMA_BUG); + pci_dev_put(bridge); + break; + } + } while (bridge); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) + tp->pdev_peer = tg3_find_peer(tp); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_flag_set(tp, 5717_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || + tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, 57765_PLUS); + + /* Intentionally exclude ASIC_REV_5906 */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, 5755_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || + tg3_flag(tp, 5755_PLUS) || + tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, 5750_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, 5705_PLUS); + + /* Determine TSO capabilities */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) + ; /* Do nothing. HW bug. */ + else if (tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, HW_TSO_3); + else if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_flag_set(tp, HW_TSO_2); + else if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, HW_TSO_1); + tg3_flag_set(tp, TSO_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && + tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) + tg3_flag_clear(tp, TSO_BUG); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + tg3_flag_set(tp, TSO_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) + tp->fw_needed = FIRMWARE_TG3TSO5; + else + tp->fw_needed = FIRMWARE_TG3TSO; + } + + /* Selectively allow TSO based on operating conditions */ + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3) || + (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) + tg3_flag_set(tp, TSO_CAPABLE); + else { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) + tp->fw_needed = FIRMWARE_TG3; + + tp->irq_max = 1; + + if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSI); + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && + tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && + tp->pdev_peer == tp->pdev)) + tg3_flag_clear(tp, SUPPORT_MSI); + + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_flag_set(tp, 1SHOT_MSI); + } + + if (tg3_flag(tp, 57765_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSIX); + tp->irq_max = TG3_IRQ_MAX_VECS; + } + } + + if (tg3_flag(tp, 5755_PLUS)) + tg3_flag_set(tp, SHORT_DMA_BUG); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_flag_set(tp, 4K_FIFO_LIMIT); + + if (tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, LRG_PROD_RING_CAP); + + if (tg3_flag(tp, 57765_PLUS) && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0) + tg3_flag_set(tp, USE_JUMBO_BDFLAG); + + if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, USE_JUMBO_BDFLAG)) + tg3_flag_set(tp, JUMBO_CAPABLE); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + + if (pci_is_pcie(tp->pdev)) { + u16 lnkctl; + + tg3_flag_set(tp, PCI_EXPRESS); + + tp->pcie_readrq = 4096; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tp->pcie_readrq = 2048; + + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5906) { + tg3_flag_clear(tp, HW_TSO_2); + tg3_flag_clear(tp, TSO_CAPABLE); + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) + tg3_flag_set(tp, CLKREQ_BUG); + } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { + tg3_flag_set(tp, L1PLLPD_EN); + } + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + /* BCM5785 devices are effectively PCIe devices, and should + * follow PCIe codepaths, but do not have a PCIe capabilities + * section. + */ + tg3_flag_set(tp, PCI_EXPRESS); + } else if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS)) { + tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); + if (!tp->pcix_cap) { + dev_err(&tp->pdev->dev, + "Cannot find PCI-X capability, aborting\n"); + return -EIO; + } + + if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) + tg3_flag_set(tp, PCIX_MODE); + } + + /* If we have an AMD 762 or VIA K8T800 chipset, write + * reordering to the mailbox registers done by the host + * controller can cause major troubles. We read back from + * every mailbox register write to force the writes to be + * posted to the chip in order. + */ + if (pci_dev_present(tg3_write_reorder_chipsets) && + !tg3_flag(tp, PCI_EXPRESS)) + tg3_flag_set(tp, MBOX_WRITE_REORDER); + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + &tp->pci_cacheline_sz); + pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, + &tp->pci_lat_timer); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && + tp->pci_lat_timer < 64) { + tp->pci_lat_timer = 64; + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + + /* Important! -- It is critical that the PCI-X hw workaround + * situation is decided before the first MMIO register access. + */ + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { + /* 5700 BX chips need to have their TX producer index + * mailboxes written twice to workaround a bug. + */ + tg3_flag_set(tp, TXD_MBOX_HWBUG); + + /* If we are in PCI-X mode, enable register write workaround. + * + * The workaround is to use indirect register accesses + * for all chip writes not to mailbox registers. + */ + if (tg3_flag(tp, PCIX_MODE)) { + u32 pm_reg; + + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + + /* The chip can have it's power management PCI config + * space registers clobbered due to this bug. + * So explicitly force the chip into D0 here. + */ + pci_read_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, + &pm_reg); + pm_reg &= ~PCI_PM_CTRL_STATE_MASK; + pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; + pci_write_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, + pm_reg); + + /* Also, force SERR#/PERR# in PCI command. */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + } + + if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) + tg3_flag_set(tp, PCI_HIGH_SPEED); + if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) + tg3_flag_set(tp, PCI_32BIT); + + /* Chip-specific fixup from Broadcom driver */ + if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && + (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { + pci_state_reg |= PCISTATE_RETRY_SAME_DMA; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); + } + + /* Default fast path register access methods */ + tp->read32 = tg3_read32; + tp->write32 = tg3_write32; + tp->read32_mbox = tg3_read32; + tp->write32_mbox = tg3_write32; + tp->write32_tx_mbox = tg3_write32; + tp->write32_rx_mbox = tg3_write32; + + /* Various workaround register access methods */ + if (tg3_flag(tp, PCIX_TARGET_HWBUG)) + tp->write32 = tg3_write_indirect_reg32; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || + (tg3_flag(tp, PCI_EXPRESS) && + tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { + /* + * Back to back register writes can cause problems on these + * chips, the workaround is to read back all reg writes + * except those to mailbox regs. + * + * See tg3_write_indirect_reg32(). + */ + tp->write32 = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + if (tg3_flag(tp, MBOX_WRITE_REORDER)) + tp->write32_rx_mbox = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, ICH_WORKAROUND)) { + tp->read32 = tg3_read_indirect_reg32; + tp->write32 = tg3_write_indirect_reg32; + tp->read32_mbox = tg3_read_indirect_mbox; + tp->write32_mbox = tg3_write_indirect_mbox; + tp->write32_tx_mbox = tg3_write_indirect_mbox; + tp->write32_rx_mbox = tg3_write_indirect_mbox; + + iounmap(tp->regs); + tp->regs = NULL; + + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_MEMORY; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->read32_mbox = tg3_read32_mbox_5906; + tp->write32_mbox = tg3_write32_mbox_5906; + tp->write32_tx_mbox = tg3_write32_mbox_5906; + tp->write32_rx_mbox = tg3_write32_mbox_5906; + } + + if (tp->write32 == tg3_write_indirect_reg32 || + (tg3_flag(tp, PCIX_MODE) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) + tg3_flag_set(tp, SRAM_USE_CONFIG); + + /* The memory arbiter has to be enabled in order for SRAM accesses + * to succeed. Normally on powerup the tg3 chip firmware will make + * sure it is enabled, but other entities such as system netboot + * code might disable it. + */ + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + if (tg3_flag(tp, PCIX_MODE)) { + pci_read_config_dword(tp->pdev, + tp->pcix_cap + PCI_X_STATUS, &val); + tp->pci_fn = val & 0x7; + } else { + tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; + } + + /* Get eeprom hw config before calling tg3_set_power_state(). + * In particular, the TG3_FLAG_IS_NIC flag must be + * determined before calling tg3_set_power_state() so that + * we know whether or not to switch out of Vaux power. + * When the flag is set, it means that GPIO1 is used for eeprom + * write protect and also implies that it is a LOM where GPIOs + * are not used to switch power. + */ + tg3_get_eeprom_hw_cfg(tp); + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, + pci_state_reg); + + tg3_ape_lock_init(tp); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, CPMU_PRESENT); + + /* Set up tp->grc_local_ctrl before calling + * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high + * will bring 5700's external PHY out of reset. + * It is also used as eeprom write protect on LOMs. + */ + tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + /* Unused GPIO3 must be driven as output on 5752 because there + * are no pull-up resistors on unused GPIO pins. + */ + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* Turn off the debug UART. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + if (tg3_flag(tp, IS_NIC)) + /* Keep VMain power. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OUTPUT0; + } + + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + + /* Derive initial jumbo mode from MTU assigned in + * ether_setup() via the alloc_etherdev() call + */ + if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, JUMBO_RING_ENABLE); + + /* Determine WakeOnLan speed to use. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { + tg3_flag_clear(tp, WOL_SPEED_100MB); + } else { + tg3_flag_set(tp, WOL_SPEED_100MB); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->phy_flags |= TG3_PHYFLG_IS_FET; + + /* A few boards don't want Ethernet@WireSpeed phy feature */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && + (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || + (tp->phy_flags & TG3_PHYFLG_IS_FET) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) + tp->phy_flags |= TG3_PHYFLG_ADC_BUG; + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) + tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; + + if (tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && + !tg3_flag(tp, 57765_PLUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && + tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) + tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) + tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; + } else + tp->phy_flags |= TG3_PHYFLG_BER_BUG; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + tp->phy_otp = tg3_read_otp_phycfg(tp); + if (tp->phy_otp == 0) + tp->phy_otp = TG3_OTP_DEFAULT; + } + + if (tg3_flag(tp, CPMU_PRESENT)) + tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; + else + tp->mi_mode = MAC_MI_MODE_BASE; + + tp->coalesce_mode = 0; + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) + tp->coalesce_mode |= HOSTCC_MODE_32BYTE; + + /* Set these bits to enable statistics workaround. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { + tp->coalesce_mode |= HOSTCC_MODE_ATTN; + tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + tg3_flag_set(tp, USE_PHYLIB); + + err = tg3_mdio_init(tp); + if (err) + return err; + + /* Initialize data/descriptor byte/word swapping. */ + val = tr32(GRC_MODE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | + GRC_MODE_WORD_SWAP_B2HRX_DATA | + GRC_MODE_B2HRX_ENABLE | + GRC_MODE_HTX2B_ENABLE | + GRC_MODE_HOST_STACKUP); + else + val &= GRC_MODE_HOST_STACKUP; + + tw32(GRC_MODE, val | tp->grc_mode); + + tg3_switch_clocks(tp); + + /* Clear this out for sanity. */ + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && + !tg3_flag(tp, PCIX_TARGET_HWBUG)) { + u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); + + if (chiprevid == CHIPREV_ID_5701_A0 || + chiprevid == CHIPREV_ID_5701_B0 || + chiprevid == CHIPREV_ID_5701_B2 || + chiprevid == CHIPREV_ID_5701_B5) { + void __iomem *sram_base; + + /* Write some dummy words into the SRAM status block + * area, see if it reads back correctly. If the return + * value is bad, force enable the PCIX workaround. + */ + sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; + + writel(0x00000000, sram_base); + writel(0x00000000, sram_base + 4); + writel(0xffffffff, sram_base + 4); + if (readl(sram_base) != 0x00000000) + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + } + } + + udelay(50); + tg3_nvram_init(tp); + + grc_misc_cfg = tr32(GRC_MISC_CFG); + grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || + grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) + tg3_flag_set(tp, IS_5788); + + if (!tg3_flag(tp, IS_5788) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tg3_flag_set(tp, TAGGED_STATUS); + if (tg3_flag(tp, TAGGED_STATUS)) { + tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD); + + tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + } + + /* Preserve the APE MAC_MODE bits */ + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + else + tp->mac_mode = 0; + + /* these are limited to 10/100 only */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && + (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && + (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || + (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && + (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || + (tp->phy_flags & TG3_PHYFLG_IS_FET)) + tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; + + err = tg3_phy_probe(tp); + if (err) { + dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); + /* ... but do not return immediately ... */ + tg3_mdio_fini(tp); + } + + tg3_read_vpd(tp); + tg3_read_fw_ver(tp); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } else { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + else + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } + + /* 5700 {AX,BX} chips have a broken status block link + * change bit implementation, so we must use the + * status register in those cases. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tg3_flag_set(tp, USE_LINKCHG_REG); + else + tg3_flag_clear(tp, USE_LINKCHG_REG); + + /* The led_ctrl is set during tg3_phy_probe, here we might + * have to force the link status polling mechanism based + * upon subsystem IDs. + */ + if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + tg3_flag_set(tp, USE_LINKCHG_REG); + } + + /* For all SERDES we poll the MAC status register. */ + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + tg3_flag_set(tp, POLL_SERDES); + else + tg3_flag_clear(tp, POLL_SERDES); + + tp->rx_offset = NET_IP_ALIGN; + tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + tg3_flag(tp, PCIX_MODE)) { + tp->rx_offset = 0; + #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + tp->rx_copy_thresh = ~(u16)0; + #endif + } + + tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; + tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; + tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; + + tp->rx_std_max_post = tp->rx_std_ring_mask + 1; + + /* Increment the rx prod index on the rx std ring by at most + * 8 for these chips to workaround hw errata. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tp->rx_std_max_post = 8; + + if (tg3_flag(tp, ASPM_WORKAROUND)) + tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & + PCIE_PWR_MGMT_L1_THRESH_MSK; + + return err; + } + + #ifdef CONFIG_SPARC + static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) + { + struct net_device *dev = tp->dev; + struct pci_dev *pdev = tp->pdev; + struct device_node *dp = pci_device_to_OF_node(pdev); + const unsigned char *addr; + int len; + + addr = of_get_property(dp, "local-mac-address", &len); + if (addr && len == 6) { + memcpy(dev->dev_addr, addr, 6); + memcpy(dev->perm_addr, dev->dev_addr, 6); + return 0; + } + return -ENODEV; + } + + static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) + { + struct net_device *dev = tp->dev; + + memcpy(dev->dev_addr, idprom->id_ethaddr, 6); + memcpy(dev->perm_addr, idprom->id_ethaddr, 6); + return 0; + } + #endif + + static int __devinit tg3_get_device_address(struct tg3 *tp) + { + struct net_device *dev = tp->dev; + u32 hi, lo, mac_offset; + int addr_ok = 0; + + #ifdef CONFIG_SPARC + if (!tg3_get_macaddr_sparc(tp)) + return 0; + #endif + + mac_offset = 0x7c; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + tg3_flag(tp, 5780_CLASS)) { + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + mac_offset = 0xcc; + if (tg3_nvram_lock(tp)) + tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); + else + tg3_nvram_unlock(tp); + } else if (tg3_flag(tp, 5717_PLUS)) { + if (tp->pci_fn & 1) + mac_offset = 0xcc; + if (tp->pci_fn > 1) + mac_offset += 0x18c; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mac_offset = 0x10; + + /* First try to get it from MAC address mailbox. */ + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); + if ((hi >> 16) == 0x484b) { + dev->dev_addr[0] = (hi >> 8) & 0xff; + dev->dev_addr[1] = (hi >> 0) & 0xff; + + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[5] = (lo >> 0) & 0xff; + + /* Some old bootcode may report a 0 MAC address in SRAM */ + addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); + } + if (!addr_ok) { + /* Next, try NVRAM. */ + if (!tg3_flag(tp, NO_NVRAM) && + !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && + !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { + memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); + memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); + } + /* Finally just fetch it out of the MAC control regs. */ + else { + hi = tr32(MAC_ADDR_0_HIGH); + lo = tr32(MAC_ADDR_0_LOW); + + dev->dev_addr[5] = lo & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[1] = hi & 0xff; + dev->dev_addr[0] = (hi >> 8) & 0xff; + } + } + + if (!is_valid_ether_addr(&dev->dev_addr[0])) { + #ifdef CONFIG_SPARC + if (!tg3_get_default_macaddr_sparc(tp)) + return 0; + #endif + return -EINVAL; + } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + return 0; + } + + #define BOUNDARY_SINGLE_CACHELINE 1 + #define BOUNDARY_MULTI_CACHELINE 2 + + static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) + { + int cacheline_size; + u8 byte; + int goal; + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); + if (byte == 0) + cacheline_size = 1024; + else + cacheline_size = (int) byte * 4; + + /* On 5703 and later chips, the boundary bits have no + * effect. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + !tg3_flag(tp, PCI_EXPRESS)) + goto out; + + #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) + goal = BOUNDARY_MULTI_CACHELINE; + #else + #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) + goal = BOUNDARY_SINGLE_CACHELINE; + #else + goal = 0; + #endif + #endif + + if (tg3_flag(tp, 57765_PLUS)) { + val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + goto out; + } + + if (!goal) + goto out; + + /* PCI controllers on most RISC systems tend to disconnect + * when a device tries to burst across a cache-line boundary. + * Therefore, letting tg3 do so just wastes PCI bandwidth. + * + * Unfortunately, for PCI-E there are only limited + * write-side controls for this, and thus for reads + * we will still get the disconnects. We'll also waste + * these PCI cycles for both read and write for chips + * other than 5700 and 5701 which do not implement the + * boundary bits. + */ + if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | + DMA_RWCTRL_WRITE_BNDRY_128_PCIX); + } else { + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + } + break; + + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | + DMA_RWCTRL_WRITE_BNDRY_256_PCIX); + break; + + default: + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + break; + } + } else if (tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; + break; + } + /* fallthrough */ + case 128: + default: + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; + break; + } + } else { + switch (cacheline_size) { + case 16: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_16 | + DMA_RWCTRL_WRITE_BNDRY_16); + break; + } + /* fallthrough */ + case 32: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_32 | + DMA_RWCTRL_WRITE_BNDRY_32); + break; + } + /* fallthrough */ + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_64 | + DMA_RWCTRL_WRITE_BNDRY_64); + break; + } + /* fallthrough */ + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128 | + DMA_RWCTRL_WRITE_BNDRY_128); + break; + } + /* fallthrough */ + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256 | + DMA_RWCTRL_WRITE_BNDRY_256); + break; + case 512: + val |= (DMA_RWCTRL_READ_BNDRY_512 | + DMA_RWCTRL_WRITE_BNDRY_512); + break; + case 1024: + default: + val |= (DMA_RWCTRL_READ_BNDRY_1024 | + DMA_RWCTRL_WRITE_BNDRY_1024); + break; + } + } + + out: + return val; + } + + static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) + { + struct tg3_internal_buffer_desc test_desc; + u32 sram_dma_descs; + int i, ret; + + sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; + + tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); + tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); + tw32(RDMAC_STATUS, 0); + tw32(WDMAC_STATUS, 0); + + tw32(BUFMGR_MODE, 0); + tw32(FTQ_RESET, 0); + + test_desc.addr_hi = ((u64) buf_dma) >> 32; + test_desc.addr_lo = buf_dma & 0xffffffff; + test_desc.nic_mbuf = 0x00002100; + test_desc.len = size; + + /* + * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz + * the *second* time the tg3 driver was getting loaded after an + * initial scan. + * + * Broadcom tells me: + * ...the DMA engine is connected to the GRC block and a DMA + * reset may affect the GRC block in some unpredictable way... + * The behavior of resets to individual blocks has not been tested. + * + * Broadcom noted the GRC reset will also reset all sub-components. + */ + if (to_device) { + test_desc.cqid_sqid = (13 << 8) | 2; + + tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); + udelay(40); + } else { + test_desc.cqid_sqid = (16 << 8) | 7; + + tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); + udelay(40); + } + test_desc.flags = 0x00000005; + + for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { + u32 val; + + val = *(((u32 *)&test_desc) + i); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, + sram_dma_descs + (i * sizeof(u32))); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + } + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + + if (to_device) + tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); + else + tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); + + ret = -ENODEV; + for (i = 0; i < 40; i++) { + u32 val; + + if (to_device) + val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); + else + val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); + if ((val & 0xffff) == sram_dma_descs) { + ret = 0; + break; + } + + udelay(100); + } + + return ret; + } + + #define TEST_BUFFER_SIZE 0x2000 + + static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, + { }, + }; + + static int __devinit tg3_test_dma(struct tg3 *tp) + { + dma_addr_t buf_dma; + u32 *buf, saved_dma_rwctrl; + int ret = 0; + + buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, + &buf_dma, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out_nofree; + } + + tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | + (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); + + tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); + + if (tg3_flag(tp, 57765_PLUS)) + goto out; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* DMA read watermark not used on PCIE */ + tp->dma_rwctrl |= 0x00180000; + } else if (!tg3_flag(tp, PCIX_MODE)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) + tp->dma_rwctrl |= 0x003f0000; + else + tp->dma_rwctrl |= 0x003f000f; + } else { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); + u32 read_water = 0x7; + + /* If the 5704 is behind the EPB bridge, we can + * do the less restrictive ONE_DMA workaround for + * better performance. + */ + if (tg3_flag(tp, 40BIT_DMA_BUG) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tp->dma_rwctrl |= 0x8000; + else if (ccval == 0x6 || ccval == 0x7) + tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) + read_water = 4; + /* Set bit 23 to enable PCIX hw bug fix */ + tp->dma_rwctrl |= + (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | + (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | + (1 << 23); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { + /* 5780 always in PCIX mode */ + tp->dma_rwctrl |= 0x00144000; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + /* 5714 always in PCIX mode */ + tp->dma_rwctrl |= 0x00148000; + } else { + tp->dma_rwctrl |= 0x001b000f; + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tp->dma_rwctrl &= 0xfffffff0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + /* Remove this if it causes problems for some boards. */ + tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; + + /* On 5700/5701 chips, we need to set this bit. + * Otherwise the chip will issue cacheline transactions + * to streamable DMA memory with not all the byte + * enables turned on. This is an error on several + * RISC PCI controllers, in particular sparc64. + * + * On 5703/5704 chips, this bit has been reassigned + * a different meaning. In particular, it is used + * on those chips to enable a PCI-X workaround. + */ + tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + + #if 0 + /* Unneeded, already done by tg3_get_invariants. */ + tg3_switch_clocks(tp); + #endif + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + goto out; + + /* It is best to perform DMA test with maximum write burst size + * to expose the 5700/5701 write DMA bug. + */ + saved_dma_rwctrl = tp->dma_rwctrl; + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + + while (1) { + u32 *p = buf, i; + + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) + p[i] = i; + + /* Send the buffer to the chip. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); + if (ret) { + dev_err(&tp->pdev->dev, + "%s: Buffer write failed. err = %d\n", + __func__, ret); + break; + } + + #if 0 + /* validate data reached card RAM correctly. */ + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { + u32 val; + tg3_read_mem(tp, 0x2100 + (i*4), &val); + if (le32_to_cpu(val) != p[i]) { + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on device! " + "(%d != %d)\n", __func__, val, i); + /* ret = -ENODEV here? */ + } + p[i] = 0; + } + #endif + /* Now read it back. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); + if (ret) { + dev_err(&tp->pdev->dev, "%s: Buffer read failed. " + "err = %d\n", __func__, ret); + break; + } + + /* Verify it. */ + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { + if (p[i] == i) + continue; + + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + break; + } else { + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on read back! " + "(%d != %d)\n", __func__, p[i], i); + ret = -ENODEV; + goto out; + } + } + + if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { + /* Success. */ + ret = 0; + break; + } + } + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + /* DMA test passed without adjusting DMA boundary, + * now look for chipsets that are known to expose the + * DMA bug without failing the test. + */ + if (pci_dev_present(tg3_dma_wait_state_chipsets)) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + } else { + /* Safe to use the calculated DMA boundary. */ + tp->dma_rwctrl = saved_dma_rwctrl; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + + out: + dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); + out_nofree: + return ret; + } + + static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) + { + if (tg3_flag(tp, 57765_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_57765; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_57765; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_57765; + } else if (tg3_flag(tp, 5705_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5705; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5705; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5906; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5906; + } + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_5780; + } else { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO; + } + + tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; + tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; + } + + static char * __devinit tg3_phy_string(struct tg3 *tp) + { + switch (tp->phy_id & TG3_PHY_ID_MASK) { + case TG3_PHY_ID_BCM5400: return "5400"; + case TG3_PHY_ID_BCM5401: return "5401"; + case TG3_PHY_ID_BCM5411: return "5411"; + case TG3_PHY_ID_BCM5701: return "5701"; + case TG3_PHY_ID_BCM5703: return "5703"; + case TG3_PHY_ID_BCM5704: return "5704"; + case TG3_PHY_ID_BCM5705: return "5705"; + case TG3_PHY_ID_BCM5750: return "5750"; + case TG3_PHY_ID_BCM5752: return "5752"; + case TG3_PHY_ID_BCM5714: return "5714"; + case TG3_PHY_ID_BCM5780: return "5780"; + case TG3_PHY_ID_BCM5755: return "5755"; + case TG3_PHY_ID_BCM5787: return "5787"; + case TG3_PHY_ID_BCM5784: return "5784"; + case TG3_PHY_ID_BCM5756: return "5722/5756"; + case TG3_PHY_ID_BCM5906: return "5906"; + case TG3_PHY_ID_BCM5761: return "5761"; + case TG3_PHY_ID_BCM5718C: return "5718C"; + case TG3_PHY_ID_BCM5718S: return "5718S"; + case TG3_PHY_ID_BCM57765: return "57765"; + case TG3_PHY_ID_BCM5719C: return "5719C"; + case TG3_PHY_ID_BCM5720C: return "5720C"; + case TG3_PHY_ID_BCM8002: return "8002/serdes"; + case 0: return "serdes"; + default: return "unknown"; + } + } + + static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) + { + if (tg3_flag(tp, PCI_EXPRESS)) { + strcpy(str, "PCI Express"); + return str; + } else if (tg3_flag(tp, PCIX_MODE)) { + u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; + + strcpy(str, "PCIX:"); + + if ((clock_ctrl == 7) || + ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == + GRC_MISC_CFG_BOARD_ID_5704CIOBE)) + strcat(str, "133MHz"); + else if (clock_ctrl == 0) + strcat(str, "33MHz"); + else if (clock_ctrl == 2) + strcat(str, "50MHz"); + else if (clock_ctrl == 4) + strcat(str, "66MHz"); + else if (clock_ctrl == 6) + strcat(str, "100MHz"); + } else { + strcpy(str, "PCI:"); + if (tg3_flag(tp, PCI_HIGH_SPEED)) + strcat(str, "66MHz"); + else + strcat(str, "33MHz"); + } + if (tg3_flag(tp, PCI_32BIT)) + strcat(str, ":32-bit"); + else + strcat(str, ":64-bit"); + return str; + } + + static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) + { + struct pci_dev *peer; + unsigned int func, devnr = tp->pdev->devfn & ~7; + + for (func = 0; func < 8; func++) { + peer = pci_get_slot(tp->pdev->bus, devnr | func); + if (peer && peer != tp->pdev) + break; + pci_dev_put(peer); + } + /* 5704 can be configured in single-port mode, set peer to + * tp->pdev in that case. + */ + if (!peer) { + peer = tp->pdev; + return peer; + } + + /* + * We don't need to keep the refcount elevated; there's no way + * to remove one half of this device without removing the other + */ + pci_dev_put(peer); + + return peer; + } + + static void __devinit tg3_init_coal(struct tg3 *tp) + { + struct ethtool_coalesce *ec = &tp->coal; + + memset(ec, 0, sizeof(*ec)); + ec->cmd = ETHTOOL_GCOALESCE; + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; + ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; + ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; + ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; + ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; + ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; + + if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD)) { + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; + } + + if (tg3_flag(tp, 5705_PLUS)) { + ec->rx_coalesce_usecs_irq = 0; + ec->tx_coalesce_usecs_irq = 0; + ec->stats_block_coalesce_usecs = 0; + } + } + + static const struct net_device_ops tg3_netdev_ops = { + .ndo_open = tg3_open, + .ndo_stop = tg3_close, + .ndo_start_xmit = tg3_start_xmit, + .ndo_get_stats64 = tg3_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = tg3_set_rx_mode, + .ndo_set_mac_address = tg3_set_mac_addr, + .ndo_do_ioctl = tg3_ioctl, + .ndo_tx_timeout = tg3_tx_timeout, + .ndo_change_mtu = tg3_change_mtu, + .ndo_fix_features = tg3_fix_features, + .ndo_set_features = tg3_set_features, + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tg3_poll_controller, + #endif + }; + + static int __devinit tg3_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) + { + struct net_device *dev; + struct tg3 *tp; + int i, err, pm_cap; + u32 sndmbx, rcvmbx, intmbx; + char str[40]; + u64 dma_mask, persist_dma_mask; + u32 features = 0; + + printk_once(KERN_INFO "%s\n", version); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + /* Find power-management capability. */ + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm_cap == 0) { + dev_err(&pdev->dev, + "Cannot find Power Management capability, aborting\n"); + err = -EIO; + goto err_out_free_res; + } + + err = pci_set_power_state(pdev, PCI_D0); + if (err) { + dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); + goto err_out_free_res; + } + + dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); + if (!dev) { + dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); + err = -ENOMEM; + goto err_out_power_down; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + tp = netdev_priv(dev); + tp->pdev = pdev; + tp->dev = dev; + tp->pm_cap = pm_cap; + tp->rx_mode = TG3_DEF_RX_MODE; + tp->tx_mode = TG3_DEF_TX_MODE; + + if (tg3_debug > 0) + tp->msg_enable = tg3_debug; + else + tp->msg_enable = TG3_DEF_MSG_ENABLE; + + /* The word/byte swap controls here control register access byte + * swapping. DMA data byte swapping is controlled in the GRC_MODE + * setting below. + */ + tp->misc_host_ctrl = + MISC_HOST_CTRL_MASK_PCI_INT | + MISC_HOST_CTRL_WORD_SWAP | + MISC_HOST_CTRL_INDIR_ACCESS | + MISC_HOST_CTRL_PCISTATE_RW; + + /* The NONFRM (non-frame) byte/word swap controls take effect + * on descriptor entries, anything which isn't packet data. + * + * The StrongARM chips on the board (one for tx, one for rx) + * are running in big-endian mode. + */ + tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | + GRC_MODE_WSWAP_NONFRM_DATA); + #ifdef __BIG_ENDIAN + tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; + #endif + spin_lock_init(&tp->lock); + spin_lock_init(&tp->indirect_lock); + INIT_WORK(&tp->reset_task, tg3_reset_task); + + tp->regs = pci_ioremap_bar(pdev, BAR_0); + if (!tp->regs) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_dev; + } + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { + tg3_flag_set(tp, ENABLE_APE); + tp->aperegs = pci_ioremap_bar(pdev, BAR_2); + if (!tp->aperegs) { + dev_err(&pdev->dev, + "Cannot map APE registers, aborting\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + } + + tp->rx_pending = TG3_DEF_RX_RING_PENDING; + tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; + + dev->ethtool_ops = &tg3_ethtool_ops; + dev->watchdog_timeo = TG3_TX_TIMEOUT; + dev->netdev_ops = &tg3_netdev_ops; + dev->irq = pdev->irq; + + err = tg3_get_invariants(tp); + if (err) { + dev_err(&pdev->dev, + "Problem fetching invariants of chip, aborting\n"); + goto err_out_apeunmap; + } + + /* The EPB bridge inside 5714, 5715, and 5780 and any + * device behind the EPB cannot support DMA addresses > 40-bit. + * On 64-bit systems with IOMMU, use 40-bit dma_mask. + * On 64-bit systems without IOMMU, use 64-bit dma_mask and + * do DMA address check in tg3_start_xmit(). + */ + if (tg3_flag(tp, IS_5788)) + persist_dma_mask = dma_mask = DMA_BIT_MASK(32); + else if (tg3_flag(tp, 40BIT_DMA_BUG)) { + persist_dma_mask = dma_mask = DMA_BIT_MASK(40); + #ifdef CONFIG_HIGHMEM + dma_mask = DMA_BIT_MASK(64); + #endif + } else + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); + + /* Configure DMA attributes. */ + if (dma_mask > DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, dma_mask); + if (!err) { + features |= NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, + persist_dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "Unable to obtain 64 bit " + "DMA for consistent allocations\n"); + goto err_out_apeunmap; + } + } + } + if (err || dma_mask == DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_out_apeunmap; + } + } + + tg3_init_bufmgr_config(tp); + + features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + + /* 5700 B0 chips do not support checksumming correctly due + * to hardware bugs. + */ + if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { + features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + + if (tg3_flag(tp, 5755_PLUS)) + features |= NETIF_F_IPV6_CSUM; + } + + /* TSO is on by default on chips that support hardware TSO. + * Firmware TSO on older chips gives lower performance, so it + * is off by default, but can be enabled using ethtool. + */ + if ((tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) && + (features & NETIF_F_IP_CSUM)) + features |= NETIF_F_TSO; + if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { + if (features & NETIF_F_IPV6_CSUM) + features |= NETIF_F_TSO6; + if (tg3_flag(tp, HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + features |= NETIF_F_TSO_ECN; + } + + dev->features |= features; + dev->vlan_features |= features; + + /* + * Add loopback capability only for a subset of devices that support + * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY + * loopback for the remaining devices. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) + /* Add the loopback capability */ + features |= NETIF_F_LOOPBACK; + + dev->hw_features |= features; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && + !tg3_flag(tp, TSO_CAPABLE) && + !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { + tg3_flag_set(tp, MAX_RXPEND_64); + tp->rx_pending = 63; + } + + err = tg3_get_device_address(tp); + if (err) { + dev_err(&pdev->dev, + "Could not obtain valid ethernet address, aborting\n"); + goto err_out_apeunmap; + } + + /* + * Reset chip in case UNDI or EFI driver did not shutdown + * DMA self test will enable WDMAC and we'll see (spurious) + * pending DMA on the PCI bus at that point. + */ + if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || + (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + } + + err = tg3_test_dma(tp); + if (err) { + dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); + goto err_out_apeunmap; + } + + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < tp->irq_max; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->tp = tp; + tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; + + tnapi->int_mbox = intmbx; + if (i <= 4) + intmbx += 0x8; + else + intmbx += 0x4; + + tnapi->consmbox = rcvmbx; + tnapi->prodmbox = sndmbx; + + if (i) + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + else + tnapi->coal_now = HOSTCC_MODE_NOW; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + break; + + /* + * If we support MSIX, we'll be using RSS. If we're using + * RSS, the first vector only handles link interrupts and the + * remaining vectors handle rx and tx interrupts. Reuse the + * mailbox values for the next iteration. The values we setup + * above are still useful for the single vectored mode. + */ + if (!i) + continue; + + rcvmbx += 0x8; + + if (sndmbx & 0x4) + sndmbx -= 0x4; + else + sndmbx += 0xc; + } + + tg3_init_coal(tp); + + pci_set_drvdata(pdev, dev); + + if (tg3_flag(tp, 5717_PLUS)) { + /* Resume a low-power mode */ + tg3_frob_aux_power(tp, false); + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting\n"); + goto err_out_apeunmap; + } + + netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", + tp->board_part_number, + tp->pci_chip_rev_id, + tg3_bus_string(tp, str), + dev->dev_addr); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + struct phy_device *phydev; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + } else { + char *ethtype; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + ethtype = "10/100Base-TX"; + else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + ethtype = "1000Base-SX"; + else + ethtype = "10/100/1000Base-T"; + + netdev_info(dev, "attached PHY is %s (%s Ethernet) " + "(WireSpeed[%d], EEE[%d])\n", + tg3_phy_string(tp), ethtype, + (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, + (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); + } + + netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", + (dev->features & NETIF_F_RXCSUM) != 0, + tg3_flag(tp, USE_LINKCHG_REG) != 0, + (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, + tg3_flag(tp, ENABLE_ASF) != 0, + tg3_flag(tp, TSO_CAPABLE) != 0); + netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", + tp->dma_rwctrl, + pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : + ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); + + pci_save_state(pdev); + + return 0; + + err_out_apeunmap: + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + + err_out_iounmap: + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + + err_out_free_dev: + free_netdev(dev); + + err_out_power_down: + pci_set_power_state(pdev, PCI_D3hot); + + err_out_free_res: + pci_release_regions(pdev); + + err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; + } + + static void __devexit tg3_remove_one(struct pci_dev *pdev) + { + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct tg3 *tp = netdev_priv(dev); + + if (tp->fw) + release_firmware(tp->fw); + + cancel_work_sync(&tp->reset_task); + - if (!tg3_flag(tp, USE_PHYLIB)) { ++ if (tg3_flag(tp, USE_PHYLIB)) { + tg3_phy_fini(tp); + tg3_mdio_fini(tp); + } + + unregister_netdev(dev); + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } + } + + #ifdef CONFIG_PM_SLEEP + static int tg3_suspend(struct device *device) + { + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return 0; + + flush_work_sync(&tp->reset_task); + tg3_phy_stop(tp); + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + + tg3_full_lock(tp, 1); + tg3_disable_ints(tp); + tg3_full_unlock(tp); + + netif_device_detach(dev); + + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_flag_clear(tp, INIT_COMPLETE); + tg3_full_unlock(tp); + + err = tg3_power_down_prepare(tp); + if (err) { + int err2; + + tg3_full_lock(tp, 0); + + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, 1); + if (err2) + goto out; + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + netif_device_attach(dev); + tg3_netif_start(tp); + + out: + tg3_full_unlock(tp); + + if (!err2) + tg3_phy_start(tp); + } + + return err; + } + + static int tg3_resume(struct device *device) + { + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return 0; + + netif_device_attach(dev); + + tg3_full_lock(tp, 0); + + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + if (err) + goto out; + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + + out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; + } + + static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); + #define TG3_PM_OPS (&tg3_pm_ops) + + #else + + #define TG3_PM_OPS NULL + + #endif /* CONFIG_PM_SLEEP */ + + /** + * tg3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ + static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; + + netdev_info(netdev, "PCI I/O error detected\n"); + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + tg3_flag_clear(tp, RESTART_TIMER); + + /* Want to make sure that the reset task doesn't run */ + cancel_work_sync(&tp->reset_task); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + tg3_flag_clear(tp, RESTART_TIMER); + + netif_device_detach(netdev); + + /* Clean up software state, even if MMIO is blocked */ + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + tg3_full_unlock(tp); + + done: + if (state == pci_channel_io_perm_failure) + err = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return err; + } + + /** + * tg3_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ + static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + int err; + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); + goto done; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!netif_running(netdev)) { + rc = PCI_ERS_RESULT_RECOVERED; + goto done; + } + + err = tg3_power_up(tp); + if (err) + goto done; + + rc = PCI_ERS_RESULT_RECOVERED; + + done: + rtnl_unlock(); + + return rc; + } + + /** + * tg3_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ + static void tg3_io_resume(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + int err; + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_full_lock(tp, 0); + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + tg3_full_unlock(tp); + if (err) { + netdev_err(netdev, "Cannot restart hardware after reset.\n"); + goto done; + } + + netif_device_attach(netdev); + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + + tg3_phy_start(tp); + + done: + rtnl_unlock(); + } + + static struct pci_error_handlers tg3_err_handler = { + .error_detected = tg3_io_error_detected, + .slot_reset = tg3_io_slot_reset, + .resume = tg3_io_resume + }; + + static struct pci_driver tg3_driver = { + .name = DRV_MODULE_NAME, + .id_table = tg3_pci_tbl, + .probe = tg3_init_one, + .remove = __devexit_p(tg3_remove_one), + .err_handler = &tg3_err_handler, + .driver.pm = TG3_PM_OPS, + }; + + static int __init tg3_init(void) + { + return pci_register_driver(&tg3_driver); + } + + static void __exit tg3_cleanup(void) + { + pci_unregister_driver(&tg3_driver); + } + + module_init(tg3_init); + module_exit(tg3_cleanup); diff --combined drivers/net/ethernet/cadence/at91_ether.c index 0000000,1b0ba8c..56624d3 mode 000000,100644..100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@@ -1,0 -1,1254 +1,1254 @@@ + /* + * Ethernet driver for the Atmel AT91RM9200 (Thunder) + * + * Copyright (C) 2003 SAN People (Pty) Ltd + * + * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc. + * Initial version by Rick Bronson 01/11/2003 + * + * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker + * (Polaroid Corporation) + * + * Realtek RTL8201(B)L PHY support by Roman Avramenko roman@imsystems.ru + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + + #include <linux/module.h> + #include <linux/init.h> + #include <linux/interrupt.h> + #include <linux/mii.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/skbuff.h> + #include <linux/dma-mapping.h> + #include <linux/ethtool.h> + #include <linux/platform_device.h> + #include <linux/clk.h> + #include <linux/gfp.h> + + #include <asm/io.h> + #include <asm/uaccess.h> + #include <asm/mach-types.h> + + #include <mach/at91rm9200_emac.h> -#include <mach/gpio.h> ++#include <asm/gpio.h> + #include <mach/board.h> + + #include "at91_ether.h" + + #define DRV_NAME "at91_ether" + #define DRV_VERSION "1.0" + + #define LINK_POLL_INTERVAL (HZ) + + /* ..................................................................... */ + + /* + * Read from a EMAC register. + */ + static inline unsigned long at91_emac_read(unsigned int reg) + { + void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; + + return __raw_readl(emac_base + reg); + } + + /* + * Write to a EMAC register. + */ + static inline void at91_emac_write(unsigned int reg, unsigned long value) + { + void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; + + __raw_writel(value, emac_base + reg); + } + + /* ........................... PHY INTERFACE ........................... */ + + /* + * Enable the MDIO bit in MAC control register + * When not called from an interrupt-handler, access to the PHY must be + * protected by a spinlock. + */ + static void enable_mdi(void) + { + unsigned long ctl; + + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */ + } + + /* + * Disable the MDIO bit in the MAC control register + */ + static void disable_mdi(void) + { + unsigned long ctl; + + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */ + } + + /* + * Wait until the PHY operation is complete. + */ + static inline void at91_phy_wait(void) { + unsigned long timeout = jiffies + 2; + + while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) { + if (time_after(jiffies, timeout)) { + printk("at91_ether: MIO timeout\n"); + break; + } + cpu_relax(); + } + } + + /* + * Write value to the a PHY register + * Note: MDI interface is assumed to already have been enabled. + */ + static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value) + { + at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W + | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA)); + + /* Wait until IDLE bit in Network Status register is cleared */ + at91_phy_wait(); + } + + /* + * Read value stored in a PHY register. + * Note: MDI interface is assumed to already have been enabled. + */ + static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value) + { + at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R + | ((phy_addr & 0x1f) << 23) | (address << 18)); + + /* Wait until IDLE bit in Network Status register is cleared */ + at91_phy_wait(); + + *value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA; + } + + /* ........................... PHY MANAGEMENT .......................... */ + + /* + * Access the PHY to determine the current link speed and mode, and update the + * MAC accordingly. + * If no link or auto-negotiation is busy, then no changes are made. + */ + static void update_linkspeed(struct net_device *dev, int silent) + { + struct at91_private *lp = netdev_priv(dev); + unsigned int bmsr, bmcr, lpa, mac_cfg; + unsigned int speed, duplex; + + if (!mii_link_ok(&lp->mii)) { /* no link */ + netif_carrier_off(dev); + if (!silent) + printk(KERN_INFO "%s: Link down.\n", dev->name); + return; + } + + /* Link up, or auto-negotiation still in progress */ + read_phy(lp->phy_address, MII_BMSR, &bmsr); + read_phy(lp->phy_address, MII_BMCR, &bmcr); + if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */ + if (!(bmsr & BMSR_ANEGCOMPLETE)) + return; /* Do nothing - another interrupt generated when negotiation complete */ + + read_phy(lp->phy_address, MII_LPA, &lpa); + if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100; + else speed = SPEED_10; + if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL; + else duplex = DUPLEX_HALF; + } else { + speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; + duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + /* Update the MAC */ + mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD); + if (speed == SPEED_100) { + if (duplex == DUPLEX_FULL) /* 100 Full Duplex */ + mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD; + else /* 100 Half Duplex */ + mac_cfg |= AT91_EMAC_SPD; + } else { + if (duplex == DUPLEX_FULL) /* 10 Full Duplex */ + mac_cfg |= AT91_EMAC_FD; + else {} /* 10 Half Duplex */ + } + at91_emac_write(AT91_EMAC_CFG, mac_cfg); + + if (!silent) + printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); + netif_carrier_on(dev); + } + + /* + * Handle interrupts from the PHY + */ + static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id) + { + struct net_device *dev = (struct net_device *) dev_id; + struct at91_private *lp = netdev_priv(dev); + unsigned int phy; + + /* + * This hander is triggered on both edges, but the PHY chips expect + * level-triggering. We therefore have to check if the PHY actually has + * an IRQ pending. + */ + enable_mdi(); + if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { + read_phy(lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */ + if (!(phy & (1 << 0))) + goto done; + } + else if (lp->phy_type == MII_LXT971A_ID) { + read_phy(lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */ + if (!(phy & (1 << 2))) + goto done; + } + else if (lp->phy_type == MII_BCM5221_ID) { + read_phy(lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */ + if (!(phy & (1 << 0))) + goto done; + } + else if (lp->phy_type == MII_KS8721_ID) { + read_phy(lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */ + if (!(phy & ((1 << 2) | 1))) + goto done; + } + else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */ + read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy); + if (!(phy & ((1 << 2) | 1))) + goto done; + } + else if (lp->phy_type == MII_DP83848_ID) { + read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */ + if (!(phy & (1 << 7))) + goto done; + } + + update_linkspeed(dev, 0); + + done: + disable_mdi(); + + return IRQ_HANDLED; + } + + /* + * Initialize and enable the PHY interrupt for link-state changes + */ + static void enable_phyirq(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned int dsintr, irq_number; + int status; + + irq_number = lp->board_data.phy_irq_pin; + if (!irq_number) { + /* + * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L), + * or board does not have it connected. + */ + mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL); + return; + } + + status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev); + if (status) { + printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status); + return; + } + + spin_lock_irq(&lp->lock); + enable_mdi(); + + if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ + read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); + dsintr = dsintr & ~0xf00; /* clear bits 8..11 */ + write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); + } + else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ + read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); + dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */ + write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); + } + else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ + dsintr = (1 << 15) | ( 1 << 14); + write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); + } + else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ + dsintr = (1 << 10) | ( 1 << 8); + write_phy(lp->phy_address, MII_TPISTATUS, dsintr); + } + else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ + read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); + dsintr = dsintr | 0x500; /* set bits 8, 10 */ + write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); + } + else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ + read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); + dsintr = dsintr | 0x3c; /* set bits 2..5 */ + write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); + read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); + dsintr = dsintr | 0x3; /* set bits 0,1 */ + write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); + } + + disable_mdi(); + spin_unlock_irq(&lp->lock); + } + + /* + * Disable the PHY interrupt + */ + static void disable_phyirq(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned int dsintr; + unsigned int irq_number; + + irq_number = lp->board_data.phy_irq_pin; + if (!irq_number) { + del_timer_sync(&lp->check_timer); + return; + } + + spin_lock_irq(&lp->lock); + enable_mdi(); + + if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ + read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); + dsintr = dsintr | 0xf00; /* set bits 8..11 */ + write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); + } + else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ + read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); + dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */ + write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); + } + else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ + read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr); + dsintr = ~(1 << 14); + write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); + } + else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ + read_phy(lp->phy_address, MII_TPISTATUS, &dsintr); + dsintr = ~((1 << 10) | (1 << 8)); + write_phy(lp->phy_address, MII_TPISTATUS, dsintr); + } + else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ + read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); + dsintr = dsintr & ~0x500; /* clear bits 8, 10 */ + write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); + } + else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ + read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); + dsintr = dsintr & ~0x3; /* clear bits 0, 1 */ + write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); + read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); + dsintr = dsintr & ~0x3c; /* clear bits 2..5 */ + write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); + } + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + free_irq(irq_number, dev); /* Free interrupt handler */ + } + + /* + * Perform a software reset of the PHY. + */ + #if 0 + static void reset_phy(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned int bmcr; + + spin_lock_irq(&lp->lock); + enable_mdi(); + + /* Perform PHY reset */ + write_phy(lp->phy_address, MII_BMCR, BMCR_RESET); + + /* Wait until PHY reset is complete */ + do { + read_phy(lp->phy_address, MII_BMCR, &bmcr); + } while (!(bmcr & BMCR_RESET)); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + } + #endif + + static void at91ether_check_link(unsigned long dev_id) + { + struct net_device *dev = (struct net_device *) dev_id; + struct at91_private *lp = netdev_priv(dev); + + enable_mdi(); + update_linkspeed(dev, 1); + disable_mdi(); + + mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL); + } + + /* ......................... ADDRESS MANAGEMENT ........................ */ + + /* + * NOTE: Your bootloader must always set the MAC address correctly before + * booting into Linux. + * + * - It must always set the MAC address after reset, even if it doesn't + * happen to access the Ethernet while it's booting. Some versions of + * U-Boot on the AT91RM9200-DK do not do this. + * + * - Likewise it must store the addresses in the correct byte order. + * MicroMonitor (uMon) on the CSB337 does this incorrectly (and + * continues to do so, for bug-compatibility). + */ + + static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo) + { + char addr[6]; + + if (machine_is_csb337()) { + addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */ + addr[4] = (lo & 0xff00) >> 8; + addr[3] = (lo & 0xff0000) >> 16; + addr[2] = (lo & 0xff000000) >> 24; + addr[1] = (hi & 0xff); + addr[0] = (hi & 0xff00) >> 8; + } + else { + addr[0] = (lo & 0xff); + addr[1] = (lo & 0xff00) >> 8; + addr[2] = (lo & 0xff0000) >> 16; + addr[3] = (lo & 0xff000000) >> 24; + addr[4] = (hi & 0xff); + addr[5] = (hi & 0xff00) >> 8; + } + + if (is_valid_ether_addr(addr)) { + memcpy(dev->dev_addr, &addr, 6); + return 1; + } + return 0; + } + + /* + * Set the ethernet MAC address in dev->dev_addr + */ + static void __init get_mac_address(struct net_device *dev) + { + /* Check Specific-Address 1 */ + if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L))) + return; + /* Check Specific-Address 2 */ + if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L))) + return; + /* Check Specific-Address 3 */ + if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L))) + return; + /* Check Specific-Address 4 */ + if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L))) + return; + + printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n"); + } + + /* + * Program the hardware MAC address from dev->dev_addr. + */ + static void update_mac_address(struct net_device *dev) + { + at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0])); + at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4])); + + at91_emac_write(AT91_EMAC_SA2L, 0); + at91_emac_write(AT91_EMAC_SA2H, 0); + } + + /* + * Store the new hardware address in dev->dev_addr, and update the MAC. + */ + static int set_mac_address(struct net_device *dev, void* addr) + { + struct sockaddr *address = addr; + + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, address->sa_data, dev->addr_len); + update_mac_address(dev); + + printk("%s: Setting MAC address to %pM\n", dev->name, + dev->dev_addr); + + return 0; + } + + static int inline hash_bit_value(int bitnr, __u8 *addr) + { + if (addr[bitnr / 8] & (1 << (bitnr % 8))) + return 1; + return 0; + } + + /* + * The hash address register is 64 bits long and takes up two locations in the memory map. + * The least significant bits are stored in EMAC_HSL and the most significant + * bits in EMAC_HSH. + * + * The unicast hash enable and the multicast hash enable bits in the network configuration + * register enable the reception of hash matched frames. The destination address is + * reduced to a 6 bit index into the 64 bit hash register using the following hash function. + * The hash function is an exclusive or of every sixth bit of the destination address. + * hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] + * hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] + * hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] + * hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] + * hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] + * hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] + * da[0] represents the least significant bit of the first byte received, that is, the multicast/ + * unicast indicator, and da[47] represents the most significant bit of the last byte + * received. + * If the hash index points to a bit that is set in the hash register then the frame will be + * matched according to whether the frame is multicast or unicast. + * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and + * the hash index points to a bit set in the hash register. + * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the + * hash index points to a bit set in the hash register. + * To receive all multicast frames, the hash register should be set with all ones and the + * multicast hash enable bit should be set in the network configuration register. + */ + + /* + * Return the hash index value for the specified address. + */ + static int hash_get_index(__u8 *addr) + { + int i, j, bitval; + int hash_index = 0; + + for (j = 0; j < 6; j++) { + for (i = 0, bitval = 0; i < 8; i++) + bitval ^= hash_bit_value(i*6 + j, addr); + + hash_index |= (bitval << j); + } + + return hash_index; + } + + /* + * Add multicast addresses to the internal multicast-hash table. + */ + static void at91ether_sethashtable(struct net_device *dev) + { + struct netdev_hw_addr *ha; + unsigned long mc_filter[2]; + unsigned int bitnr; + + mc_filter[0] = mc_filter[1] = 0; + + netdev_for_each_mc_addr(ha, dev) { + bitnr = hash_get_index(ha->addr); + mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); + } + + at91_emac_write(AT91_EMAC_HSL, mc_filter[0]); + at91_emac_write(AT91_EMAC_HSH, mc_filter[1]); + } + + /* + * Enable/Disable promiscuous and multicast modes. + */ + static void at91ether_set_multicast_list(struct net_device *dev) + { + unsigned long cfg; + + cfg = at91_emac_read(AT91_EMAC_CFG); + + if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */ + cfg |= AT91_EMAC_CAF; + else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */ + cfg &= ~AT91_EMAC_CAF; + + if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */ + at91_emac_write(AT91_EMAC_HSH, -1); + at91_emac_write(AT91_EMAC_HSL, -1); + cfg |= AT91_EMAC_MTI; + } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */ + at91ether_sethashtable(dev); + cfg |= AT91_EMAC_MTI; + } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */ + at91_emac_write(AT91_EMAC_HSH, 0); + at91_emac_write(AT91_EMAC_HSL, 0); + cfg &= ~AT91_EMAC_MTI; + } + + at91_emac_write(AT91_EMAC_CFG, cfg); + } + + /* ......................... ETHTOOL SUPPORT ........................... */ + + static int mdio_read(struct net_device *dev, int phy_id, int location) + { + unsigned int value; + + read_phy(phy_id, location, &value); + return value; + } + + static void mdio_write(struct net_device *dev, int phy_id, int location, int value) + { + write_phy(phy_id, location, value); + } + + static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct at91_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + enable_mdi(); + + ret = mii_ethtool_gset(&lp->mii, cmd); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */ + cmd->supported = SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } + + return ret; + } + + static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct at91_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + enable_mdi(); + + ret = mii_ethtool_sset(&lp->mii, cmd); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + return ret; + } + + static int at91ether_nwayreset(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + enable_mdi(); + + ret = mii_nway_restart(&lp->mii); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + return ret; + } + + static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) + { + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); + } + + static const struct ethtool_ops at91ether_ethtool_ops = { + .get_settings = at91ether_get_settings, + .set_settings = at91ether_set_settings, + .get_drvinfo = at91ether_get_drvinfo, + .nway_reset = at91ether_nwayreset, + .get_link = ethtool_op_get_link, + }; + + static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + { + struct at91_private *lp = netdev_priv(dev); + int res; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irq(&lp->lock); + enable_mdi(); + res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL); + disable_mdi(); + spin_unlock_irq(&lp->lock); + + return res; + } + + /* ................................ MAC ................................ */ + + /* + * Initialize and start the Receiver and Transmit subsystems + */ + static void at91ether_start(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + struct recv_desc_bufs *dlist, *dlist_phys; + int i; + unsigned long ctl; + + dlist = lp->dlist; + dlist_phys = lp->dlist_phys; + + for (i = 0; i < MAX_RX_DESCR; i++) { + dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0]; + dlist->descriptors[i].size = 0; + } + + /* Set the Wrap bit on the last descriptor */ + dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP; + + /* Reset buffer index */ + lp->rxBuffIndex = 0; + + /* Program address of descriptor list in Rx Buffer Queue register */ + at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys); + + /* Enable Receive and Transmit */ + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE); + } + + /* + * Open the ethernet interface + */ + static int at91ether_open(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned long ctl; + + if (!is_valid_ether_addr(dev->dev_addr)) + return -EADDRNOTAVAIL; + + clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */ + + /* Clear internal statistics */ + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR); + + /* Update the MAC address (incase user has changed it) */ + update_mac_address(dev); + + /* Enable PHY interrupt */ + enable_phyirq(dev); + + /* Enable MAC interrupts */ + at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA + | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM + | AT91_EMAC_ROVR | AT91_EMAC_ABT); + + /* Determine current link speed */ + spin_lock_irq(&lp->lock); + enable_mdi(); + update_linkspeed(dev, 0); + disable_mdi(); + spin_unlock_irq(&lp->lock); + + at91ether_start(dev); + netif_start_queue(dev); + return 0; + } + + /* + * Close the interface + */ + static int at91ether_close(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + unsigned long ctl; + + /* Disable Receiver and Transmitter */ + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE)); + + /* Disable PHY interrupt */ + disable_phyirq(dev); + + /* Disable MAC interrupts */ + at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA + | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM + | AT91_EMAC_ROVR | AT91_EMAC_ABT); + + netif_stop_queue(dev); + + clk_disable(lp->ether_clk); /* Disable Peripheral clock */ + + return 0; + } + + /* + * Transmit packet. + */ + static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + + if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) { + netif_stop_queue(dev); + + /* Store packet information (to free when Tx completed) */ + lp->skb = skb; + lp->skb_length = skb->len; + lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); + dev->stats.tx_bytes += skb->len; + + /* Set address of the data in the Transmit Address register */ + at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr); + /* Set length of the packet in the Transmit Control register */ + at91_emac_write(AT91_EMAC_TCR, skb->len); + + } else { + printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); + return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) + on this skb, he also reports -ENETDOWN and printk's, so either + we free and return(0) or don't free and return 1 */ + } + + return NETDEV_TX_OK; + } + + /* + * Update the current statistics from the internal statistics registers. + */ + static struct net_device_stats *at91ether_stats(struct net_device *dev) + { + int ale, lenerr, seqe, lcol, ecol; + + if (netif_running(dev)) { + dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */ + ale = at91_emac_read(AT91_EMAC_ALE); + dev->stats.rx_frame_errors += ale; /* Alignment errors */ + lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF); + dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */ + seqe = at91_emac_read(AT91_EMAC_SEQE); + dev->stats.rx_crc_errors += seqe; /* CRC error */ + dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */ + dev->stats.rx_errors += (ale + lenerr + seqe + + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB)); + + dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */ + dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */ + dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */ + dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */ + + lcol = at91_emac_read(AT91_EMAC_LCOL); + ecol = at91_emac_read(AT91_EMAC_ECOL); + dev->stats.tx_window_errors += lcol; /* Late collisions */ + dev->stats.tx_aborted_errors += ecol; /* 16 collisions */ + + dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol); + } + return &dev->stats; + } + + /* + * Extract received frame from buffer descriptors and sent to upper layers. + * (Called from interrupt context) + */ + static void at91ether_rx(struct net_device *dev) + { + struct at91_private *lp = netdev_priv(dev); + struct recv_desc_bufs *dlist; + unsigned char *p_recv; + struct sk_buff *skb; + unsigned int pktlen; + + dlist = lp->dlist; + while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) { + p_recv = dlist->recv_buf[lp->rxBuffIndex]; + pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */ + skb = dev_alloc_skb(pktlen + 2); + if (skb != NULL) { + skb_reserve(skb, 2); + memcpy(skb_put(skb, pktlen), p_recv, pktlen); + + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_bytes += pktlen; + netif_rx(skb); + } + else { + dev->stats.rx_dropped += 1; + printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); + } + + if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST) + dev->stats.multicast++; + + dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */ + if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */ + lp->rxBuffIndex = 0; + else + lp->rxBuffIndex++; + } + } + + /* + * MAC interrupt handler + */ + static irqreturn_t at91ether_interrupt(int irq, void *dev_id) + { + struct net_device *dev = (struct net_device *) dev_id; + struct at91_private *lp = netdev_priv(dev); + unsigned long intstatus, ctl; + + /* MAC Interrupt Status register indicates what interrupts are pending. + It is automatically cleared once read. */ + intstatus = at91_emac_read(AT91_EMAC_ISR); + + if (intstatus & AT91_EMAC_RCOM) /* Receive complete */ + at91ether_rx(dev); + + if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */ + /* The TCOM bit is set even if the transmission failed. */ + if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY)) + dev->stats.tx_errors += 1; + + if (lp->skb) { + dev_kfree_skb_irq(lp->skb); + lp->skb = NULL; + dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); + } + netif_wake_queue(dev); + } + + /* Work-around for Errata #11 */ + if (intstatus & AT91_EMAC_RBNA) { + ctl = at91_emac_read(AT91_EMAC_CTL); + at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE); + at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE); + } + + if (intstatus & AT91_EMAC_ROVR) + printk("%s: ROVR error\n", dev->name); + + return IRQ_HANDLED; + } + + #ifdef CONFIG_NET_POLL_CONTROLLER + static void at91ether_poll_controller(struct net_device *dev) + { + unsigned long flags; + + local_irq_save(flags); + at91ether_interrupt(dev->irq, dev); + local_irq_restore(flags); + } + #endif + + static const struct net_device_ops at91ether_netdev_ops = { + .ndo_open = at91ether_open, + .ndo_stop = at91ether_close, + .ndo_start_xmit = at91ether_start_xmit, + .ndo_get_stats = at91ether_stats, + .ndo_set_rx_mode = at91ether_set_multicast_list, + .ndo_set_mac_address = set_mac_address, + .ndo_do_ioctl = at91ether_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = at91ether_poll_controller, + #endif + }; + + /* + * Initialize the ethernet interface + */ + static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address, + struct platform_device *pdev, struct clk *ether_clk) + { + struct at91_eth_data *board_data = pdev->dev.platform_data; + struct net_device *dev; + struct at91_private *lp; + unsigned int val; + int res; + + dev = alloc_etherdev(sizeof(struct at91_private)); + if (!dev) + return -ENOMEM; + + dev->base_addr = AT91_VA_BASE_EMAC; + dev->irq = AT91RM9200_ID_EMAC; + + /* Install the interrupt handler */ + if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) { + free_netdev(dev); + return -EBUSY; + } + + /* Allocate memory for DMA Receive descriptors */ + lp = netdev_priv(dev); + lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL); + if (lp->dlist == NULL) { + free_irq(dev->irq, dev); + free_netdev(dev); + return -ENOMEM; + } + lp->board_data = *board_data; + lp->ether_clk = ether_clk; + platform_set_drvdata(pdev, dev); + + spin_lock_init(&lp->lock); + + ether_setup(dev); + dev->netdev_ops = &at91ether_netdev_ops; + dev->ethtool_ops = &at91ether_ethtool_ops; + + SET_NETDEV_DEV(dev, &pdev->dev); + + get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */ + update_mac_address(dev); /* Program ethernet address into MAC */ + + at91_emac_write(AT91_EMAC_CTL, 0); + + if (lp->board_data.is_rmii) + at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII); + else + at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG); + + /* Perform PHY-specific initialization */ + spin_lock_irq(&lp->lock); + enable_mdi(); + if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { + read_phy(phy_address, MII_DSCR_REG, &val); + if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */ + lp->phy_media = PORT_FIBRE; + } else if (machine_is_csb337()) { + /* mix link activity status into LED2 link state */ + write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22); + } else if (machine_is_ecbat91()) + write_phy(phy_address, MII_LEDCTRL_REG, 0x156A); + + disable_mdi(); + spin_unlock_irq(&lp->lock); + + lp->mii.dev = dev; /* Support for ethtool */ + lp->mii.mdio_read = mdio_read; + lp->mii.mdio_write = mdio_write; + lp->mii.phy_id = phy_address; + lp->mii.phy_id_mask = 0x1f; + lp->mii.reg_num_mask = 0x1f; + + lp->phy_type = phy_type; /* Type of PHY connected */ + lp->phy_address = phy_address; /* MDI address of PHY */ + + /* Register the network interface */ + res = register_netdev(dev); + if (res) { + free_irq(dev->irq, dev); + free_netdev(dev); + dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys); + return res; + } + + /* Determine current link speed */ + spin_lock_irq(&lp->lock); + enable_mdi(); + update_linkspeed(dev, 0); + disable_mdi(); + spin_unlock_irq(&lp->lock); + netif_carrier_off(dev); /* will be enabled in open() */ + + /* If board has no PHY IRQ, use a timer to poll the PHY */ + if (!lp->board_data.phy_irq_pin) { + init_timer(&lp->check_timer); + lp->check_timer.data = (unsigned long)dev; + lp->check_timer.function = at91ether_check_link; + } else if (lp->board_data.phy_irq_pin >= 32) + gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy"); + + /* Display ethernet banner */ + printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n", + dev->name, (uint) dev->base_addr, dev->irq, + at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", + at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", + dev->dev_addr); + if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) + printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)"); + else if (phy_type == MII_LXT971A_ID) + printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name); + else if (phy_type == MII_RTL8201_ID) + printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name); + else if (phy_type == MII_BCM5221_ID) + printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name); + else if (phy_type == MII_DP83847_ID) + printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name); + else if (phy_type == MII_DP83848_ID) + printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name); + else if (phy_type == MII_AC101L_ID) + printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name); + else if (phy_type == MII_KS8721_ID) + printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name); + else if (phy_type == MII_T78Q21x3_ID) + printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name); + else if (phy_type == MII_LAN83C185_ID) + printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name); + + return 0; + } + + /* + * Detect MAC and PHY and perform initialization + */ + static int __init at91ether_probe(struct platform_device *pdev) + { + unsigned int phyid1, phyid2; + int detected = -1; + unsigned long phy_id; + unsigned short phy_address = 0; + struct clk *ether_clk; + + ether_clk = clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(ether_clk)) { + printk(KERN_ERR "at91_ether: no clock defined\n"); + return -ENODEV; + } + clk_enable(ether_clk); /* Enable Peripheral clock */ + + while ((detected != 0) && (phy_address < 32)) { + /* Read the PHY ID registers */ + enable_mdi(); + read_phy(phy_address, MII_PHYSID1, &phyid1); + read_phy(phy_address, MII_PHYSID2, &phyid2); + disable_mdi(); + + phy_id = (phyid1 << 16) | (phyid2 & 0xfff0); + switch (phy_id) { + case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */ + case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */ + case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */ + case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */ + case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */ + case MII_DP83847_ID: /* National Semiconductor DP83847: */ + case MII_DP83848_ID: /* National Semiconductor DP83848: */ + case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */ + case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */ + case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */ + case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */ + detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk); + break; + } + + phy_address++; + } + + clk_disable(ether_clk); /* Disable Peripheral clock */ + + return detected; + } + + static int __devexit at91ether_remove(struct platform_device *pdev) + { + struct net_device *dev = platform_get_drvdata(pdev); + struct at91_private *lp = netdev_priv(dev); + + if (lp->board_data.phy_irq_pin >= 32) + gpio_free(lp->board_data.phy_irq_pin); + + unregister_netdev(dev); + free_irq(dev->irq, dev); + dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys); + clk_put(lp->ether_clk); + + platform_set_drvdata(pdev, NULL); + free_netdev(dev); + return 0; + } + + #ifdef CONFIG_PM + + static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) + { + struct net_device *net_dev = platform_get_drvdata(pdev); + struct at91_private *lp = netdev_priv(net_dev); + int phy_irq = lp->board_data.phy_irq_pin; + + if (netif_running(net_dev)) { + if (phy_irq) + disable_irq(phy_irq); + + netif_stop_queue(net_dev); + netif_device_detach(net_dev); + + clk_disable(lp->ether_clk); + } + return 0; + } + + static int at91ether_resume(struct platform_device *pdev) + { + struct net_device *net_dev = platform_get_drvdata(pdev); + struct at91_private *lp = netdev_priv(net_dev); + int phy_irq = lp->board_data.phy_irq_pin; + + if (netif_running(net_dev)) { + clk_enable(lp->ether_clk); + + netif_device_attach(net_dev); + netif_start_queue(net_dev); + + if (phy_irq) + enable_irq(phy_irq); + } + return 0; + } + + #else + #define at91ether_suspend NULL + #define at91ether_resume NULL + #endif + + static struct platform_driver at91ether_driver = { + .remove = __devexit_p(at91ether_remove), + .suspend = at91ether_suspend, + .resume = at91ether_resume, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + }; + + static int __init at91ether_init(void) + { + return platform_driver_probe(&at91ether_driver, at91ether_probe); + } + + static void __exit at91ether_exit(void) + { + platform_driver_unregister(&at91ether_driver); + } + + module_init(at91ether_init) + module_exit(at91ether_exit) + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); + MODULE_AUTHOR("Andrew Victor"); + MODULE_ALIAS("platform:" DRV_NAME); diff --combined drivers/net/ethernet/jme.c index 0000000,7a0c746f..7becff1 mode 000000,100644..100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@@ -1,0 -1,3236 +1,3242 @@@ + /* + * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver + * + * Copyright 2008 JMicron Technology Corporation + * http://www.jmicron.com/ + * Copyright (c) 2009 - 2010 Guo-Fu Tseng cooldavid@cooldavid.org + * + * Author: Guo-Fu Tseng cooldavid@cooldavid.org + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include <linux/module.h> + #include <linux/kernel.h> + #include <linux/pci.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/ethtool.h> + #include <linux/mii.h> + #include <linux/crc32.h> + #include <linux/delay.h> + #include <linux/spinlock.h> + #include <linux/in.h> + #include <linux/ip.h> + #include <linux/ipv6.h> + #include <linux/tcp.h> + #include <linux/udp.h> + #include <linux/if_vlan.h> + #include <linux/slab.h> + #include <net/ip6_checksum.h> + #include "jme.h" + + static int force_pseudohp = -1; + static int no_pseudohp = -1; + static int no_extplug = -1; + module_param(force_pseudohp, int, 0); + MODULE_PARM_DESC(force_pseudohp, + "Enable pseudo hot-plug feature manually by driver instead of BIOS."); + module_param(no_pseudohp, int, 0); + MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); + module_param(no_extplug, int, 0); + MODULE_PARM_DESC(no_extplug, + "Do not use external plug signal for pseudo hot-plug."); + + static int + jme_mdio_read(struct net_device *netdev, int phy, int reg) + { + struct jme_adapter *jme = netdev_priv(netdev); + int i, val, again = (reg == MII_BMSR) ? 1 : 0; + + read_again: + jwrite32(jme, JME_SMI, SMI_OP_REQ | + smi_phy_addr(phy) | + smi_reg_addr(reg)); + + wmb(); + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + val = jread32(jme, JME_SMI); + if ((val & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) { + pr_err("phy(%d) read timeout : %d\n", phy, reg); + return 0; + } + + if (again--) + goto read_again; + + return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; + } + + static void + jme_mdio_write(struct net_device *netdev, + int phy, int reg, int val) + { + struct jme_adapter *jme = netdev_priv(netdev); + int i; + + jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | + ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | + smi_phy_addr(phy) | smi_reg_addr(reg)); + + wmb(); + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) + pr_err("phy(%d) write timeout : %d\n", phy, reg); + } + + static inline void + jme_reset_phy_processor(struct jme_adapter *jme) + { + u32 val; + + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_ADVERTISE, ADVERTISE_ALL | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + val = jme_mdio_read(jme->dev, + jme->mii_if.phy_id, + MII_BMCR); + + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_BMCR, val | BMCR_RESET); + } + + static void + jme_setup_wakeup_frame(struct jme_adapter *jme, + const u32 *mask, u32 crc, int fnr) + { + int i; + + /* + * Setup CRC pattern + */ + jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, crc); + wmb(); + + /* + * Setup Mask + */ + for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { + jwrite32(jme, JME_WFOI, + ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | + (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, mask[i]); + wmb(); + } + } + + static inline void + jme_mac_rxclk_off(struct jme_adapter *jme) + { + jme->reg_gpreg1 |= GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); + } + + static inline void + jme_mac_rxclk_on(struct jme_adapter *jme) + { + jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); + } + + static inline void + jme_mac_txclk_off(struct jme_adapter *jme) + { + jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_mac_txclk_on(struct jme_adapter *jme) + { + u32 speed = jme->reg_ghc & GHC_SPEED; + if (speed == GHC_SPEED_1000M) + jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; + else + jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_reset_ghc_speed(struct jme_adapter *jme) + { + jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_reset_250A2_workaround(struct jme_adapter *jme) + { + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); + } + + static inline void + jme_assert_ghc_reset(struct jme_adapter *jme) + { + jme->reg_ghc |= GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_clear_ghc_reset(struct jme_adapter *jme) + { + jme->reg_ghc &= ~GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); + } + + static inline void + jme_reset_mac_processor(struct jme_adapter *jme) + { + static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; + u32 crc = 0xCDCDCDCD; + u32 gpreg0; + int i; + + jme_reset_ghc_speed(jme); + jme_reset_250A2_workaround(jme); + + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_assert_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); + udelay(1); + jme_clear_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); + + jwrite32(jme, JME_RXDBA_LO, 0x00000000); + jwrite32(jme, JME_RXDBA_HI, 0x00000000); + jwrite32(jme, JME_RXQDC, 0x00000000); + jwrite32(jme, JME_RXNDA, 0x00000000); + jwrite32(jme, JME_TXDBA_LO, 0x00000000); + jwrite32(jme, JME_TXDBA_HI, 0x00000000); + jwrite32(jme, JME_TXQDC, 0x00000000); + jwrite32(jme, JME_TXNDA, 0x00000000); + + jwrite32(jme, JME_RXMCHT_LO, 0x00000000); + jwrite32(jme, JME_RXMCHT_HI, 0x00000000); + for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) + jme_setup_wakeup_frame(jme, mask, crc, i); + if (jme->fpgaver) + gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; + else + gpreg0 = GPREG0_DEFAULT; + jwrite32(jme, JME_GPREG0, gpreg0); + } + + static inline void + jme_clear_pm(struct jme_adapter *jme) + { + jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); + } + + static int + jme_reload_eeprom(struct jme_adapter *jme) + { + u32 val; + int i; + + val = jread32(jme, JME_SMBCSR); + + if (val & SMBCSR_EEPROMD) { + val |= SMBCSR_CNACK; + jwrite32(jme, JME_SMBCSR, val); + val |= SMBCSR_RELOAD; + jwrite32(jme, JME_SMBCSR, val); + mdelay(12); + + for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { + mdelay(1); + if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) + break; + } + + if (i == 0) { + pr_err("eeprom reload timeout\n"); + return -EIO; + } + } + + return 0; + } + + static void + jme_load_macaddr(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + unsigned char macaddr[6]; + u32 val; + + spin_lock_bh(&jme->macaddr_lock); + val = jread32(jme, JME_RXUMA_LO); + macaddr[0] = (val >> 0) & 0xFF; + macaddr[1] = (val >> 8) & 0xFF; + macaddr[2] = (val >> 16) & 0xFF; + macaddr[3] = (val >> 24) & 0xFF; + val = jread32(jme, JME_RXUMA_HI); + macaddr[4] = (val >> 0) & 0xFF; + macaddr[5] = (val >> 8) & 0xFF; + memcpy(netdev->dev_addr, macaddr, 6); + spin_unlock_bh(&jme->macaddr_lock); + } + + static inline void + jme_set_rx_pcc(struct jme_adapter *jme, int p) + { + switch (p) { + case PCC_OFF: + jwrite32(jme, JME_PCCRX0, + ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P1: + jwrite32(jme, JME_PCCRX0, + ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P2: + jwrite32(jme, JME_PCCRX0, + ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P3: + jwrite32(jme, JME_PCCRX0, + ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + default: + break; + } + wmb(); + + if (!(test_bit(JME_FLAG_POLL, &jme->flags))) + netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p); + } + + static void + jme_start_irq(struct jme_adapter *jme) + { + register struct dynpcc_info *dpi = &(jme->dpi); + + jme_set_rx_pcc(jme, PCC_P1); + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + + jwrite32(jme, JME_PCCTX, + ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | + ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | + PCCTXQ0_EN + ); + + /* + * Enable Interrupts + */ + jwrite32(jme, JME_IENS, INTR_ENABLE); + } + + static inline void + jme_stop_irq(struct jme_adapter *jme) + { + /* + * Disable Interrupts + */ + jwrite32f(jme, JME_IENC, INTR_ENABLE); + } + + static u32 + jme_linkstat_from_phy(struct jme_adapter *jme) + { + u32 phylink, bmsr; + + phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); + bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); + if (bmsr & BMSR_ANCOMP) + phylink |= PHY_LINK_AUTONEG_COMPLETE; + + return phylink; + } + + static inline void + jme_set_phyfifo_5level(struct jme_adapter *jme) + { + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); + } + + static inline void + jme_set_phyfifo_8level(struct jme_adapter *jme) + { + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); + } + + static int + jme_check_link(struct net_device *netdev, int testonly) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr; + char linkmsg[64]; + int rc = 0; + + linkmsg[0] = '\0'; + + if (jme->fpgaver) + phylink = jme_linkstat_from_phy(jme); + else + phylink = jread32(jme, JME_PHY_LINK); + + if (phylink & PHY_LINK_UP) { + if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { + /* + * If we did not enable AN + * Speed/Duplex Info should be obtained from SMI + */ + phylink = PHY_LINK_UP; + + bmcr = jme_mdio_read(jme->dev, + jme->mii_if.phy_id, + MII_BMCR); + + phylink |= ((bmcr & BMCR_SPEED1000) && + (bmcr & BMCR_SPEED100) == 0) ? + PHY_LINK_SPEED_1000M : + (bmcr & BMCR_SPEED100) ? + PHY_LINK_SPEED_100M : + PHY_LINK_SPEED_10M; + + phylink |= (bmcr & BMCR_FULLDPLX) ? + PHY_LINK_DUPLEX : 0; + + strcat(linkmsg, "Forced: "); + } else { + /* + * Keep polling for speed/duplex resolve complete + */ + while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && + --cnt) { + + udelay(1); + + if (jme->fpgaver) + phylink = jme_linkstat_from_phy(jme); + else + phylink = jread32(jme, JME_PHY_LINK); + } + if (!cnt) + pr_err("Waiting speed resolve timeout\n"); + + strcat(linkmsg, "ANed: "); + } + + if (jme->phylink == phylink) { + rc = 1; + goto out; + } + if (testonly) + goto out; + + jme->phylink = phylink; + + /* + * The speed/duplex setting of jme->reg_ghc already cleared + * by jme_reset_mac_processor() + */ + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + jme->reg_ghc |= GHC_SPEED_10M; + strcat(linkmsg, "10 Mbps, "); + break; + case PHY_LINK_SPEED_100M: + jme->reg_ghc |= GHC_SPEED_100M; + strcat(linkmsg, "100 Mbps, "); + break; + case PHY_LINK_SPEED_1000M: + jme->reg_ghc |= GHC_SPEED_1000M; + strcat(linkmsg, "1000 Mbps, "); + break; + default: + break; + } + + if (phylink & PHY_LINK_DUPLEX) { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); + jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX); + jme->reg_ghc |= GHC_DPX; + } else { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | + TXMCS_BACKOFF | + TXMCS_CARRIERSENSE | + TXMCS_COLLISION); + jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX); + } + + jwrite32(jme, JME_GHC, jme->reg_ghc); + + if (is_buggy250(jme->pdev->device, jme->chiprev)) { + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); + if (!(phylink & PHY_LINK_DUPLEX)) + jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + jme_set_phyfifo_8level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_100M: + jme_set_phyfifo_5level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_1000M: + jme_set_phyfifo_8level(jme); + break; + default: + break; + } + } + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); + + strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? + "Full-Duplex, " : + "Half-Duplex, "); + strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? + "MDI-X" : + "MDI"); + netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg); + netif_carrier_on(netdev); + } else { + if (testonly) + goto out; + + netif_info(jme, link, jme->dev, "Link is down\n"); + jme->phylink = 0; + netif_carrier_off(netdev); + } + + out: + return rc; + } + + static int + jme_setup_tx_resources(struct jme_adapter *jme) + { + struct jme_ring *txring = &(jme->txring[0]); + + txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + &(txring->dmaalloc), + GFP_ATOMIC); + + if (!txring->alloc) + goto err_set_null; + + /* + * 16 Bytes align + */ + txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), + RING_DESC_ALIGN); + txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); + txring->next_to_use = 0; + atomic_set(&txring->next_to_clean, 0); + atomic_set(&txring->nr_free, jme->tx_ring_size); + + txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->tx_ring_size, GFP_ATOMIC); + if (unlikely(!(txring->bufinf))) + goto err_free_txring; + + /* + * Initialize Transmit Descriptors + */ + memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); + memset(txring->bufinf, 0, + sizeof(struct jme_buffer_info) * jme->tx_ring_size); + + return 0; + + err_free_txring: + dma_free_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + txring->alloc, + txring->dmaalloc); + + err_set_null: + txring->desc = NULL; + txring->dmaalloc = 0; + txring->dma = 0; + txring->bufinf = NULL; + + return -ENOMEM; + } + + static void + jme_free_tx_resources(struct jme_adapter *jme) + { + int i; + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi; + + if (txring->alloc) { + if (txring->bufinf) { + for (i = 0 ; i < jme->tx_ring_size ; ++i) { + txbi = txring->bufinf + i; + if (txbi->skb) { + dev_kfree_skb(txbi->skb); + txbi->skb = NULL; + } + txbi->mapping = 0; + txbi->len = 0; + txbi->nr_desc = 0; + txbi->start_xmit = 0; + } + kfree(txring->bufinf); + } + + dma_free_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + txring->alloc, + txring->dmaalloc); + + txring->alloc = NULL; + txring->desc = NULL; + txring->dmaalloc = 0; + txring->dma = 0; + txring->bufinf = NULL; + } + txring->next_to_use = 0; + atomic_set(&txring->next_to_clean, 0); + atomic_set(&txring->nr_free, 0); + } + + static inline void + jme_enable_tx_engine(struct jme_adapter *jme) + { + /* + * Select Queue 0 + */ + jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); + wmb(); + + /* + * Setup TX Queue 0 DMA Bass Address + */ + jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); + jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); + jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); + + /* + * Setup TX Descptor Count + */ + jwrite32(jme, JME_TXQDC, jme->tx_ring_size); + + /* + * Enable TX Engine + */ + wmb(); + jwrite32f(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_ENABLE); + + /* + * Start clock for TX MAC Processor + */ + jme_mac_txclk_on(jme); + } + + static inline void + jme_restart_tx_engine(struct jme_adapter *jme) + { + /* + * Restart TX Engine + */ + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_ENABLE); + } + + static inline void + jme_disable_tx_engine(struct jme_adapter *jme) + { + int i; + u32 val; + + /* + * Disable TX Engine + */ + jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); + wmb(); + + val = jread32(jme, JME_TXCS); + for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_TXCS); + rmb(); + } + + if (!i) + pr_err("Disable TX engine timeout\n"); + + /* + * Stop clock for TX MAC Processor + */ + jme_mac_txclk_off(jme); + } + + static void + jme_set_clean_rxdesc(struct jme_adapter *jme, int i) + { + struct jme_ring *rxring = &(jme->rxring[0]); + register struct rxdesc *rxdesc = rxring->desc; + struct jme_buffer_info *rxbi = rxring->bufinf; + rxdesc += i; + rxbi += i; + + rxdesc->dw[0] = 0; + rxdesc->dw[1] = 0; + rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); + rxdesc->desc1.bufaddrl = cpu_to_le32( + (__u64)rxbi->mapping & 0xFFFFFFFFUL); + rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); + if (jme->dev->features & NETIF_F_HIGHDMA) + rxdesc->desc1.flags = RXFLAG_64BIT; + wmb(); + rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; + } + + static int + jme_make_new_rx_buf(struct jme_adapter *jme, int i) + { + struct jme_ring *rxring = &(jme->rxring[0]); + struct jme_buffer_info *rxbi = rxring->bufinf + i; + struct sk_buff *skb; + dma_addr_t mapping; + + skb = netdev_alloc_skb(jme->dev, + jme->dev->mtu + RX_EXTRA_LEN); + if (unlikely(!skb)) + return -ENOMEM; + + mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + if (likely(rxbi->mapping)) + pci_unmap_page(jme->pdev, rxbi->mapping, + rxbi->len, PCI_DMA_FROMDEVICE); + + rxbi->skb = skb; + rxbi->len = skb_tailroom(skb); + rxbi->mapping = mapping; + return 0; + } + + static void + jme_free_rx_buf(struct jme_adapter *jme, int i) + { + struct jme_ring *rxring = &(jme->rxring[0]); + struct jme_buffer_info *rxbi = rxring->bufinf; + rxbi += i; + + if (rxbi->skb) { + pci_unmap_page(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxbi->skb); + rxbi->skb = NULL; + rxbi->mapping = 0; + rxbi->len = 0; + } + } + + static void + jme_free_rx_resources(struct jme_adapter *jme) + { + int i; + struct jme_ring *rxring = &(jme->rxring[0]); + + if (rxring->alloc) { + if (rxring->bufinf) { + for (i = 0 ; i < jme->rx_ring_size ; ++i) + jme_free_rx_buf(jme, i); + kfree(rxring->bufinf); + } + + dma_free_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + rxring->alloc, + rxring->dmaalloc); + rxring->alloc = NULL; + rxring->desc = NULL; + rxring->dmaalloc = 0; + rxring->dma = 0; + rxring->bufinf = NULL; + } + rxring->next_to_use = 0; + atomic_set(&rxring->next_to_clean, 0); + } + + static int + jme_setup_rx_resources(struct jme_adapter *jme) + { + int i; + struct jme_ring *rxring = &(jme->rxring[0]); + + rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + &(rxring->dmaalloc), + GFP_ATOMIC); + if (!rxring->alloc) + goto err_set_null; + + /* + * 16 Bytes align + */ + rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), + RING_DESC_ALIGN); + rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); + rxring->next_to_use = 0; + atomic_set(&rxring->next_to_clean, 0); + + rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->rx_ring_size, GFP_ATOMIC); + if (unlikely(!(rxring->bufinf))) + goto err_free_rxring; + + /* + * Initiallize Receive Descriptors + */ + memset(rxring->bufinf, 0, + sizeof(struct jme_buffer_info) * jme->rx_ring_size); + for (i = 0 ; i < jme->rx_ring_size ; ++i) { + if (unlikely(jme_make_new_rx_buf(jme, i))) { + jme_free_rx_resources(jme); + return -ENOMEM; + } + + jme_set_clean_rxdesc(jme, i); + } + + return 0; + + err_free_rxring: + dma_free_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + rxring->alloc, + rxring->dmaalloc); + err_set_null: + rxring->desc = NULL; + rxring->dmaalloc = 0; + rxring->dma = 0; + rxring->bufinf = NULL; + + return -ENOMEM; + } + + static inline void + jme_enable_rx_engine(struct jme_adapter *jme) + { + /* + * Select Queue 0 + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0); + wmb(); + + /* + * Setup RX DMA Bass Address + */ + jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); + jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); + jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); + + /* + * Setup RX Descriptor Count + */ + jwrite32(jme, JME_RXQDC, jme->rx_ring_size); + + /* + * Setup Unicast Filter + */ + jme_set_unicastaddr(jme->dev); + jme_set_multi(jme->dev); + + /* + * Enable RX Engine + */ + wmb(); + jwrite32f(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); + + /* + * Start clock for RX MAC Processor + */ + jme_mac_rxclk_on(jme); + } + + static inline void + jme_restart_rx_engine(struct jme_adapter *jme) + { + /* + * Start RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); + } + + static inline void + jme_disable_rx_engine(struct jme_adapter *jme) + { + int i; + u32 val; + + /* + * Disable RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs); + wmb(); + + val = jread32(jme, JME_RXCS); + for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_RXCS); + rmb(); + } + + if (!i) + pr_err("Disable RX engine timeout\n"); + + /* + * Stop clock for RX MAC Processor + */ + jme_mac_rxclk_off(jme); + } + + static u16 + jme_udpsum(struct sk_buff *skb) + { + u16 csum = 0xFFFFu; + + if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) + return csum; + if (skb->protocol != htons(ETH_P_IP)) + return csum; + skb_set_network_header(skb, ETH_HLEN); + if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || + (skb->len < (ETH_HLEN + + (ip_hdr(skb)->ihl << 2) + + sizeof(struct udphdr)))) { + skb_reset_network_header(skb); + return csum; + } + skb_set_transport_header(skb, + ETH_HLEN + (ip_hdr(skb)->ihl << 2)); + csum = udp_hdr(skb)->check; + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + return csum; + } + + static int + jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) + { + if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) + return false; + + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) + == RXWBFLAG_TCPON)) { + if (flags & RXWBFLAG_IPV4) + netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n"); + return false; + } + + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) + == RXWBFLAG_UDPON) && jme_udpsum(skb)) { + if (flags & RXWBFLAG_IPV4) + netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); + return false; + } + + if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) + == RXWBFLAG_IPV4)) { + netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n"); + return false; + } + + return true; + } + + static void + jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) + { + struct jme_ring *rxring = &(jme->rxring[0]); + struct rxdesc *rxdesc = rxring->desc; + struct jme_buffer_info *rxbi = rxring->bufinf; + struct sk_buff *skb; + int framesize; + + rxdesc += idx; + rxbi += idx; + + skb = rxbi->skb; + pci_dma_sync_single_for_cpu(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + + if (unlikely(jme_make_new_rx_buf(jme, idx))) { + pci_dma_sync_single_for_device(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + + ++(NET_STAT(jme).rx_dropped); + } else { + framesize = le16_to_cpu(rxdesc->descwb.framesize) + - RX_PREPAD_SIZE; + + skb_reserve(skb, RX_PREPAD_SIZE); + skb_put(skb, framesize); + skb->protocol = eth_type_trans(skb, jme->dev); + + if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { + u16 vid = le16_to_cpu(rxdesc->descwb.vlan); + + __vlan_hwaccel_put_tag(skb, vid); + NET_STAT(jme).rx_bytes += 4; + } + jme->jme_rx(skb); + + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == + cpu_to_le16(RXWBFLAG_DEST_MUL)) + ++(NET_STAT(jme).multicast); + + NET_STAT(jme).rx_bytes += framesize; + ++(NET_STAT(jme).rx_packets); + } + + jme_set_clean_rxdesc(jme, idx); + + } + + static int + jme_process_receive(struct jme_adapter *jme, int limit) + { + struct jme_ring *rxring = &(jme->rxring[0]); + struct rxdesc *rxdesc = rxring->desc; + int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; + + if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) + goto out_inc; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + goto out_inc; + + if (unlikely(!netif_carrier_ok(jme->dev))) + goto out_inc; + + i = atomic_read(&rxring->next_to_clean); + while (limit > 0) { + rxdesc = rxring->desc; + rxdesc += i; + + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || + !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) + goto out; + --limit; + + rmb(); + desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; + + if (unlikely(desccnt > 1 || + rxdesc->descwb.errstat & RXWBERR_ALLERR)) { + + if (rxdesc->descwb.errstat & RXWBERR_CRCERR) + ++(NET_STAT(jme).rx_crc_errors); + else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) + ++(NET_STAT(jme).rx_fifo_errors); + else + ++(NET_STAT(jme).rx_errors); + + if (desccnt > 1) + limit -= desccnt - 1; + + for (j = i, ccnt = desccnt ; ccnt-- ; ) { + jme_set_clean_rxdesc(jme, j); + j = (j + 1) & (mask); + } + + } else { + jme_alloc_and_feed_skb(jme, i); + } + + i = (i + desccnt) & (mask); + } + + out: + atomic_set(&rxring->next_to_clean, i); + + out_inc: + atomic_inc(&jme->rx_cleaning); + + return limit > 0 ? limit : 0; + + } + + static void + jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) + { + if (likely(atmp == dpi->cur)) { + dpi->cnt = 0; + return; + } + + if (dpi->attempt == atmp) { + ++(dpi->cnt); + } else { + dpi->attempt = atmp; + dpi->cnt = 0; + } + + } + + static void + jme_dynamic_pcc(struct jme_adapter *jme) + { + register struct dynpcc_info *dpi = &(jme->dpi); + + if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) + jme_attempt_pcc(dpi, PCC_P3); + else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || + dpi->intr_cnt > PCC_INTR_THRESHOLD) + jme_attempt_pcc(dpi, PCC_P2); + else + jme_attempt_pcc(dpi, PCC_P1); + + if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { + if (dpi->attempt < dpi->cur) + tasklet_schedule(&jme->rxclean_task); + jme_set_rx_pcc(jme, dpi->attempt); + dpi->cur = dpi->attempt; + dpi->cnt = 0; + } + } + + static void + jme_start_pcc_timer(struct jme_adapter *jme) + { + struct dynpcc_info *dpi = &(jme->dpi); + dpi->last_bytes = NET_STAT(jme).rx_bytes; + dpi->last_pkts = NET_STAT(jme).rx_packets; + dpi->intr_cnt = 0; + jwrite32(jme, JME_TMCSR, + TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); + } + + static inline void + jme_stop_pcc_timer(struct jme_adapter *jme) + { + jwrite32(jme, JME_TMCSR, 0); + } + + static void + jme_shutdown_nic(struct jme_adapter *jme) + { + u32 phylink; + + phylink = jme_linkstat_from_phy(jme); + + if (!(phylink & PHY_LINK_UP)) { + /* + * Disable all interrupt before issue timer + */ + jme_stop_irq(jme); + jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); + } + } + + static void + jme_pcc_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct net_device *netdev = jme->dev; + + if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { + jme_shutdown_nic(jme); + return; + } + + if (unlikely(!netif_carrier_ok(netdev) || + (atomic_read(&jme->link_changing) != 1) + )) { + jme_stop_pcc_timer(jme); + return; + } + + if (!(test_bit(JME_FLAG_POLL, &jme->flags))) + jme_dynamic_pcc(jme); + + jme_start_pcc_timer(jme); + } + + static inline void + jme_polling_mode(struct jme_adapter *jme) + { + jme_set_rx_pcc(jme, PCC_OFF); + } + + static inline void + jme_interrupt_mode(struct jme_adapter *jme) + { + jme_set_rx_pcc(jme, PCC_P1); + } + + static inline int + jme_pseudo_hotplug_enabled(struct jme_adapter *jme) + { + u32 apmc; + apmc = jread32(jme, JME_APMC); + return apmc & JME_APMC_PSEUDO_HP_EN; + } + + static void + jme_start_shutdown_timer(struct jme_adapter *jme) + { + u32 apmc; + + apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; + apmc &= ~JME_APMC_EPIEN_CTRL; + if (!no_extplug) { + jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); + wmb(); + } + jwrite32f(jme, JME_APMC, apmc); + + jwrite32f(jme, JME_TIMER2, 0); + set_bit(JME_FLAG_SHUTDOWN, &jme->flags); + jwrite32(jme, JME_TMCSR, + TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); + } + + static void + jme_stop_shutdown_timer(struct jme_adapter *jme) + { + u32 apmc; + + jwrite32f(jme, JME_TMCSR, 0); + jwrite32f(jme, JME_TIMER2, 0); + clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); + + apmc = jread32(jme, JME_APMC); + apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); + jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); + wmb(); + jwrite32f(jme, JME_APMC, apmc); + } + + static void + jme_link_change_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct net_device *netdev = jme->dev; + int rc; + + while (!atomic_dec_and_test(&jme->link_changing)) { + atomic_inc(&jme->link_changing); + netif_info(jme, intr, jme->dev, "Get link change lock failed\n"); + while (atomic_read(&jme->link_changing) != 1) + netif_info(jme, intr, jme->dev, "Waiting link change lock\n"); + } + + if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) + goto out; + + jme->old_mtu = netdev->mtu; + netif_stop_queue(netdev); + if (jme_pseudo_hotplug_enabled(jme)) + jme_stop_shutdown_timer(jme); + + jme_stop_pcc_timer(jme); + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + if (netif_carrier_ok(netdev)) { + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_polling_mode(jme); + + netif_carrier_off(netdev); + } + + jme_check_link(netdev, 0); + if (netif_carrier_ok(netdev)) { + rc = jme_setup_rx_resources(jme); + if (rc) { + pr_err("Allocating resources for RX error, Device STOPPED!\n"); + goto out_enable_tasklet; + } + + rc = jme_setup_tx_resources(jme); + if (rc) { + pr_err("Allocating resources for TX error, Device STOPPED!\n"); + goto err_out_free_rx_resources; + } + + jme_enable_rx_engine(jme); + jme_enable_tx_engine(jme); + + netif_start_queue(netdev); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_interrupt_mode(jme); + + jme_start_pcc_timer(jme); + } else if (jme_pseudo_hotplug_enabled(jme)) { + jme_start_shutdown_timer(jme); + } + + goto out_enable_tasklet; + + err_out_free_rx_resources: + jme_free_rx_resources(jme); + out_enable_tasklet: + tasklet_enable(&jme->txclean_task); + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + out: + atomic_inc(&jme->link_changing); + } + + static void + jme_rx_clean_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct dynpcc_info *dpi = &(jme->dpi); + + jme_process_receive(jme, jme->rx_ring_size); + ++(dpi->intr_cnt); + + } + + static int + jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) + { + struct jme_adapter *jme = jme_napi_priv(holder); + int rest; + + rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); + + while (atomic_read(&jme->rx_empty) > 0) { + atomic_dec(&jme->rx_empty); + ++(NET_STAT(jme).rx_dropped); + jme_restart_rx_engine(jme); + } + atomic_inc(&jme->rx_empty); + + if (rest) { + JME_RX_COMPLETE(netdev, holder); + jme_interrupt_mode(jme); + } + + JME_NAPI_WEIGHT_SET(budget, rest); + return JME_NAPI_WEIGHT_VAL(budget) - rest; + } + + static void + jme_rx_empty_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + return; + + if (unlikely(!netif_carrier_ok(jme->dev))) + return; + + netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n"); + + jme_rx_clean_tasklet(arg); + + while (atomic_read(&jme->rx_empty) > 0) { + atomic_dec(&jme->rx_empty); + ++(NET_STAT(jme).rx_dropped); + jme_restart_rx_engine(jme); + } + atomic_inc(&jme->rx_empty); + } + + static void + jme_wake_queue_if_stopped(struct jme_adapter *jme) + { + struct jme_ring *txring = &(jme->txring[0]); + + smp_wmb(); + if (unlikely(netif_queue_stopped(jme->dev) && + atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { + netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n"); + netif_wake_queue(jme->dev); + } + + } + + static void + jme_tx_clean_tasklet(unsigned long arg) + { + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc = txring->desc; + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; + int i, j, cnt = 0, max, err, mask; + + tx_dbg(jme, "Into txclean\n"); + + if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) + goto out; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + goto out; + + if (unlikely(!netif_carrier_ok(jme->dev))) + goto out; + + max = jme->tx_ring_size - atomic_read(&txring->nr_free); + mask = jme->tx_ring_mask; + + for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { + + ctxbi = txbi + i; + + if (likely(ctxbi->skb && + !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { + + tx_dbg(jme, "txclean: %d+%d@%lu\n", + i, ctxbi->nr_desc, jiffies); + + err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; + + for (j = 1 ; j < ctxbi->nr_desc ; ++j) { + ttxbi = txbi + ((i + j) & (mask)); + txdesc[(i + j) & (mask)].dw[0] = 0; + + pci_unmap_page(jme->pdev, + ttxbi->mapping, + ttxbi->len, + PCI_DMA_TODEVICE); + + ttxbi->mapping = 0; + ttxbi->len = 0; + } + + dev_kfree_skb(ctxbi->skb); + + cnt += ctxbi->nr_desc; + + if (unlikely(err)) { + ++(NET_STAT(jme).tx_carrier_errors); + } else { + ++(NET_STAT(jme).tx_packets); + NET_STAT(jme).tx_bytes += ctxbi->len; + } + + ctxbi->skb = NULL; + ctxbi->len = 0; + ctxbi->start_xmit = 0; + + } else { + break; + } + + i = (i + ctxbi->nr_desc) & mask; + + ctxbi->nr_desc = 0; + } + + tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies); + atomic_set(&txring->next_to_clean, i); + atomic_add(cnt, &txring->nr_free); + + jme_wake_queue_if_stopped(jme); + + out: + atomic_inc(&jme->tx_cleaning); + } + + static void + jme_intr_msi(struct jme_adapter *jme, u32 intrstat) + { + /* + * Disable interrupt + */ + jwrite32f(jme, JME_IENC, INTR_ENABLE); + + if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { + /* + * Link change event is critical + * all other events are ignored + */ + jwrite32(jme, JME_IEVE, intrstat); + tasklet_schedule(&jme->linkch_task); + goto out_reenable; + } + + if (intrstat & INTR_TMINTR) { + jwrite32(jme, JME_IEVE, INTR_TMINTR); + tasklet_schedule(&jme->pcc_task); + } + + if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { + jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); + tasklet_schedule(&jme->txclean_task); + } + + if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { + jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | + INTR_PCCRX0 | + INTR_RX0EMP)) | + INTR_RX0); + } + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + if (intrstat & INTR_RX0EMP) + atomic_inc(&jme->rx_empty); + + if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { + if (likely(JME_RX_SCHEDULE_PREP(jme))) { + jme_polling_mode(jme); + JME_RX_SCHEDULE(jme); + } + } + } else { + if (intrstat & INTR_RX0EMP) { + atomic_inc(&jme->rx_empty); + tasklet_hi_schedule(&jme->rxempty_task); + } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { + tasklet_hi_schedule(&jme->rxclean_task); + } + } + + out_reenable: + /* + * Re-enable interrupt + */ + jwrite32f(jme, JME_IENS, INTR_ENABLE); + } + + static irqreturn_t + jme_intr(int irq, void *dev_id) + { + struct net_device *netdev = dev_id; + struct jme_adapter *jme = netdev_priv(netdev); + u32 intrstat; + + intrstat = jread32(jme, JME_IEVE); + + /* + * Check if it's really an interrupt for us + */ + if (unlikely((intrstat & INTR_ENABLE) == 0)) + return IRQ_NONE; + + /* + * Check if the device still exist + */ + if (unlikely(intrstat == ~((typeof(intrstat))0))) + return IRQ_NONE; + + jme_intr_msi(jme, intrstat); + + return IRQ_HANDLED; + } + + static irqreturn_t + jme_msi(int irq, void *dev_id) + { + struct net_device *netdev = dev_id; + struct jme_adapter *jme = netdev_priv(netdev); + u32 intrstat; + + intrstat = jread32(jme, JME_IEVE); + + jme_intr_msi(jme, intrstat); + + return IRQ_HANDLED; + } + + static void + jme_reset_link(struct jme_adapter *jme) + { + jwrite32(jme, JME_TMCSR, TMCSR_SWIT); + } + + static void + jme_restart_an(struct jme_adapter *jme) + { + u32 bmcr; + + spin_lock_bh(&jme->phy_lock); + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + spin_unlock_bh(&jme->phy_lock); + } + + static int + jme_request_irq(struct jme_adapter *jme) + { + int rc; + struct net_device *netdev = jme->dev; + irq_handler_t handler = jme_intr; + int irq_flags = IRQF_SHARED; + + if (!pci_enable_msi(jme->pdev)) { + set_bit(JME_FLAG_MSI, &jme->flags); + handler = jme_msi; + irq_flags = 0; + } + + rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (rc) { + netdev_err(netdev, + "Unable to request %s interrupt (return: %d)\n", + test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", + rc); + + if (test_bit(JME_FLAG_MSI, &jme->flags)) { + pci_disable_msi(jme->pdev); + clear_bit(JME_FLAG_MSI, &jme->flags); + } + } else { + netdev->irq = jme->pdev->irq; + } + + return rc; + } + + static void + jme_free_irq(struct jme_adapter *jme) + { + free_irq(jme->pdev->irq, jme->dev); + if (test_bit(JME_FLAG_MSI, &jme->flags)) { + pci_disable_msi(jme->pdev); + clear_bit(JME_FLAG_MSI, &jme->flags); + jme->dev->irq = jme->pdev->irq; + } + } + + static inline void + jme_new_phy_on(struct jme_adapter *jme) + { + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL); + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_ENBG; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); + } + + static inline void + jme_new_phy_off(struct jme_adapter *jme) + { + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL; + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_PDD3COLD; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); + } + + static inline void + jme_phy_on(struct jme_adapter *jme) + { + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr &= ~BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_on(jme); + } + + static inline void + jme_phy_off(struct jme_adapter *jme) + { + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_off(jme); + } + + static int + jme_open(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + int rc; + + jme_clear_pm(jme); + JME_NAPI_ENABLE(jme); + + tasklet_enable(&jme->linkch_task); + tasklet_enable(&jme->txclean_task); + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + + rc = jme_request_irq(jme); + if (rc) + goto err_out; + + jme_start_irq(jme); + + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + else + jme_reset_phy_processor(jme); + + jme_reset_link(jme); + + return 0; + + err_out: + netif_stop_queue(netdev); + netif_carrier_off(netdev); + return rc; + } + + static void + jme_set_100m_half(struct jme_adapter *jme) + { + u32 bmcr, tmp; + + jme_phy_on(jme); + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | + BMCR_SPEED1000 | BMCR_FULLDPLX); + tmp |= BMCR_SPEED100; + + if (bmcr != tmp) + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); + + if (jme->fpgaver) + jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); + else + jwrite32(jme, JME_GHC, GHC_SPEED_100M); + } + + #define JME_WAIT_LINK_TIME 2000 /* 2000ms */ + static void + jme_wait_link(struct jme_adapter *jme) + { + u32 phylink, to = JME_WAIT_LINK_TIME; + + mdelay(1000); + phylink = jme_linkstat_from_phy(jme); + while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { + mdelay(10); + phylink = jme_linkstat_from_phy(jme); + } + } + + static void + jme_powersave_phy(struct jme_adapter *jme) + { + if (jme->reg_pmcs) { + jme_set_100m_half(jme); + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) + jme_wait_link(jme); + jme_clear_pm(jme); + } else { + jme_phy_off(jme); + } + } + + static int + jme_close(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + jme_stop_irq(jme); + jme_free_irq(jme); + + JME_NAPI_DISABLE(jme); + + tasklet_disable(&jme->linkch_task); + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + jme->phylink = 0; + jme_phy_off(jme); + + return 0; + } + + static int + jme_alloc_txdesc(struct jme_adapter *jme, + struct sk_buff *skb) + { + struct jme_ring *txring = &(jme->txring[0]); + int idx, nr_alloc, mask = jme->tx_ring_mask; + + idx = txring->next_to_use; + nr_alloc = skb_shinfo(skb)->nr_frags + 2; + + if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) + return -1; + + atomic_sub(nr_alloc, &txring->nr_free); + + txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; + + return idx; + } + + static void + jme_fill_tx_map(struct pci_dev *pdev, + struct txdesc *txdesc, + struct jme_buffer_info *txbi, + struct page *page, + u32 page_offset, + u32 len, + u8 hidma) + { + dma_addr_t dmaaddr; + + dmaaddr = pci_map_page(pdev, + page, + page_offset, + len, + PCI_DMA_TODEVICE); + + pci_dma_sync_single_for_device(pdev, + dmaaddr, + len, + PCI_DMA_TODEVICE); + + txdesc->dw[0] = 0; + txdesc->dw[1] = 0; + txdesc->desc2.flags = TXFLAG_OWN; + txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; + txdesc->desc2.datalen = cpu_to_le16(len); + txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); + txdesc->desc2.bufaddrl = cpu_to_le32( + (__u64)dmaaddr & 0xFFFFFFFFUL); + + txbi->mapping = dmaaddr; + txbi->len = len; + } + + static void + jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) + { + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc = txring->desc, *ctxdesc; + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; + u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; + int i, nr_frags = skb_shinfo(skb)->nr_frags; + int mask = jme->tx_ring_mask; + const struct skb_frag_struct *frag; + u32 len; + + for (i = 0 ; i < nr_frags ; ++i) { + frag = &skb_shinfo(skb)->frags[i]; + ctxdesc = txdesc + ((idx + i + 2) & (mask)); + ctxbi = txbi + ((idx + i + 2) & (mask)); + + jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + skb_frag_page(frag), + frag->page_offset, skb_frag_size(frag), hidma); + } + + len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; + ctxdesc = txdesc + ((idx + 1) & (mask)); + ctxbi = txbi + ((idx + 1) & (mask)); + jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), + offset_in_page(skb->data), len, hidma); + + } + + static int + jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) + { + if (unlikely(skb_shinfo(skb)->gso_size && + skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { + dev_kfree_skb(skb); + return -1; + } + + return 0; + } + + static int + jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) + { + *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); + if (*mss) { + *flags |= TXFLAG_LSEN; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr, + &ip6h->daddr, 0, + IPPROTO_TCP, + 0); + } + + return 0; + } + + return 1; + } + + static void + jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) + { + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_proto; + + switch (skb->protocol) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + ip_proto = 0; + break; + } + + switch (ip_proto) { + case IPPROTO_TCP: + *flags |= TXFLAG_TCPCS; + break; + case IPPROTO_UDP: + *flags |= TXFLAG_UDPCS; + break; + default: + netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n"); + break; + } + } + } + + static inline void + jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) + { + if (vlan_tx_tag_present(skb)) { + *flags |= TXFLAG_TAGON; + *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); + } + } + + static int + jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) + { + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc; + struct jme_buffer_info *txbi; + u8 flags; + + txdesc = (struct txdesc *)txring->desc + idx; + txbi = txring->bufinf + idx; + + txdesc->dw[0] = 0; + txdesc->dw[1] = 0; + txdesc->dw[2] = 0; + txdesc->dw[3] = 0; + txdesc->desc1.pktsize = cpu_to_le16(skb->len); + /* + * Set OWN bit at final. + * When kernel transmit faster than NIC. + * And NIC trying to send this descriptor before we tell + * it to start sending this TX queue. + * Other fields are already filled correctly. + */ + wmb(); + flags = TXFLAG_OWN | TXFLAG_INT; + /* + * Set checksum flags while not tso + */ + if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) + jme_tx_csum(jme, skb, &flags); + jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); + jme_map_tx_skb(jme, skb, idx); + txdesc->desc1.flags = flags; + /* + * Set tx buffer info after telling NIC to send + * For better tx_clean timing + */ + wmb(); + txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; + txbi->skb = skb; + txbi->len = skb->len; + txbi->start_xmit = jiffies; + if (!txbi->start_xmit) + txbi->start_xmit = (0UL-1); + + return 0; + } + + static void + jme_stop_queue_if_full(struct jme_adapter *jme) + { + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi = txring->bufinf; + int idx = atomic_read(&txring->next_to_clean); + + txbi += idx; + + smp_wmb(); + if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { + netif_stop_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n"); + smp_wmb(); + if (atomic_read(&txring->nr_free) + >= (jme->tx_wake_threshold)) { + netif_wake_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n"); + } + } + + if (unlikely(txbi->start_xmit && + (jiffies - txbi->start_xmit) >= TX_TIMEOUT && + txbi->skb)) { + netif_stop_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, + "TX Queue Stopped %d@%lu\n", idx, jiffies); + } + } + + /* + * This function is already protected by netif_tx_lock() + */ + + static netdev_tx_t + jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + int idx; + + if (unlikely(jme_expand_header(jme, skb))) { + ++(NET_STAT(jme).tx_dropped); + return NETDEV_TX_OK; + } + + idx = jme_alloc_txdesc(jme, skb); + + if (unlikely(idx < 0)) { + netif_stop_queue(netdev); + netif_err(jme, tx_err, jme->dev, + "BUG! Tx ring full when queue awake!\n"); + + return NETDEV_TX_BUSY; + } + + jme_fill_tx_desc(jme, skb, idx); + + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_QUEUE0S | + TXCS_ENABLE); + + tx_dbg(jme, "xmit: %d+%d@%lu\n", + idx, skb_shinfo(skb)->nr_frags + 2, jiffies); + jme_stop_queue_if_full(jme); + + return NETDEV_TX_OK; + } + + static void + jme_set_unicastaddr(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + val = (netdev->dev_addr[3] & 0xff) << 24 | + (netdev->dev_addr[2] & 0xff) << 16 | + (netdev->dev_addr[1] & 0xff) << 8 | + (netdev->dev_addr[0] & 0xff); + jwrite32(jme, JME_RXUMA_LO, val); + val = (netdev->dev_addr[5] & 0xff) << 8 | + (netdev->dev_addr[4] & 0xff); + jwrite32(jme, JME_RXUMA_HI, val); + } + + static int + jme_set_macaddr(struct net_device *netdev, void *p) + { + struct jme_adapter *jme = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (netif_running(netdev)) + return -EBUSY; + + spin_lock_bh(&jme->macaddr_lock); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + jme_set_unicastaddr(netdev); + spin_unlock_bh(&jme->macaddr_lock); + + return 0; + } + + static void + jme_set_multi(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 mc_hash[2] = {}; + + spin_lock_bh(&jme->rxmcs_lock); + + jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; + + if (netdev->flags & IFF_PROMISC) { + jme->reg_rxmcs |= RXMCS_ALLFRAME; + } else if (netdev->flags & IFF_ALLMULTI) { + jme->reg_rxmcs |= RXMCS_ALLMULFRAME; + } else if (netdev->flags & IFF_MULTICAST) { + struct netdev_hw_addr *ha; + int bit_nr; + + jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; + netdev_for_each_mc_addr(ha, netdev) { + bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; + mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); + } + + jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); + jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); + } + + wmb(); + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + + spin_unlock_bh(&jme->rxmcs_lock); + } + + static int + jme_change_mtu(struct net_device *netdev, int new_mtu) + { + struct jme_adapter *jme = netdev_priv(netdev); + + if (new_mtu == jme->old_mtu) + return 0; + + if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || + ((new_mtu) < IPV6_MIN_MTU)) + return -EINVAL; + + if (new_mtu > 4000) { + jme->reg_rxcs &= ~RXCS_FIFOTHNP; + jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; + jme_restart_rx_engine(jme); + } else { + jme->reg_rxcs &= ~RXCS_FIFOTHNP; + jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; + jme_restart_rx_engine(jme); + } + + netdev->mtu = new_mtu; + netdev_update_features(netdev); + + jme_reset_link(jme); + + return 0; + } + + static void + jme_tx_timeout(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + + jme->phylink = 0; + jme_reset_phy_processor(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + + /* + * Force to Reset the link again + */ + jme_reset_link(jme); + } + + static inline void jme_pause_rx(struct jme_adapter *jme) + { + atomic_dec(&jme->link_changing); + + jme_set_rx_pcc(jme, PCC_OFF); + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_DISABLE(jme); + } else { + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + } + } + + static inline void jme_resume_rx(struct jme_adapter *jme) + { + struct dynpcc_info *dpi = &(jme->dpi); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_ENABLE(jme); + } else { + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + } + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + jme_set_rx_pcc(jme, PCC_P1); + + atomic_inc(&jme->link_changing); + } + + static void + jme_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) + { + struct jme_adapter *jme = netdev_priv(netdev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(jme->pdev)); + } + + static int + jme_get_regs_len(struct net_device *netdev) + { + return JME_REG_LEN; + } + + static void + mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) + { + int i; + + for (i = 0 ; i < len ; i += 4) + p[i >> 2] = jread32(jme, reg + i); + } + + static void + mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) + { + int i; + u16 *p16 = (u16 *)p; + + for (i = 0 ; i < reg_nr ; ++i) + p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); + } + + static void + jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 *p32 = (u32 *)p; + + memset(p, 0xFF, JME_REG_LEN); + + regs->version = 1; + mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); + + p32 += 0x100 >> 2; + mdio_memcpy(jme, p32, JME_PHY_REG_NR); + } + + static int + jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + + ecmd->tx_coalesce_usecs = PCC_TX_TO; + ecmd->tx_max_coalesced_frames = PCC_TX_CNT; + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + ecmd->use_adaptive_rx_coalesce = false; + ecmd->rx_coalesce_usecs = 0; + ecmd->rx_max_coalesced_frames = 0; + return 0; + } + + ecmd->use_adaptive_rx_coalesce = true; + + switch (jme->dpi.cur) { + case PCC_P1: + ecmd->rx_coalesce_usecs = PCC_P1_TO; + ecmd->rx_max_coalesced_frames = PCC_P1_CNT; + break; + case PCC_P2: + ecmd->rx_coalesce_usecs = PCC_P2_TO; + ecmd->rx_max_coalesced_frames = PCC_P2_CNT; + break; + case PCC_P3: + ecmd->rx_coalesce_usecs = PCC_P3_TO; + ecmd->rx_max_coalesced_frames = PCC_P3_CNT; + break; + default: + break; + } + + return 0; + } + + static int + jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + struct dynpcc_info *dpi = &(jme->dpi); + + if (netif_running(netdev)) + return -EBUSY; + + if (ecmd->use_adaptive_rx_coalesce && + test_bit(JME_FLAG_POLL, &jme->flags)) { + clear_bit(JME_FLAG_POLL, &jme->flags); + jme->jme_rx = netif_rx; + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + jme_set_rx_pcc(jme, PCC_P1); + jme_interrupt_mode(jme); + } else if (!(ecmd->use_adaptive_rx_coalesce) && + !(test_bit(JME_FLAG_POLL, &jme->flags))) { + set_bit(JME_FLAG_POLL, &jme->flags); + jme->jme_rx = netif_receive_skb; + jme_interrupt_mode(jme); + } + + return 0; + } + + static void + jme_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; + ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; + + spin_lock_bh(&jme->phy_lock); + val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); + spin_unlock_bh(&jme->phy_lock); + + ecmd->autoneg = + (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; + } + + static int + jme_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ + (ecmd->tx_pause != 0)) { + + if (ecmd->tx_pause) + jme->reg_txpfc |= TXPFC_PF_EN; + else + jme->reg_txpfc &= ~TXPFC_PF_EN; + + jwrite32(jme, JME_TXPFC, jme->reg_txpfc); + } + + spin_lock_bh(&jme->rxmcs_lock); + if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ + (ecmd->rx_pause != 0)) { + + if (ecmd->rx_pause) + jme->reg_rxmcs |= RXMCS_FLOWCTRL; + else + jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; + + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + } + spin_unlock_bh(&jme->rxmcs_lock); + + spin_lock_bh(&jme->phy_lock); + val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); + if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ + (ecmd->autoneg != 0)) { + + if (ecmd->autoneg) + val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + else + val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + jme_mdio_write(jme->dev, jme->mii_if.phy_id, + MII_ADVERTISE, val); + } + spin_unlock_bh(&jme->phy_lock); + + return 0; + } + + static void + jme_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) + { + struct jme_adapter *jme = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + + wol->wolopts = 0; + + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) + wol->wolopts |= WAKE_PHY; + + if (jme->reg_pmcs & PMCS_MFEN) + wol->wolopts |= WAKE_MAGIC; + + } + + static int + jme_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) + { + struct jme_adapter *jme = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_MAGICSECURE | + WAKE_UCAST | + WAKE_MCAST | + WAKE_BCAST | + WAKE_ARP)) + return -EOPNOTSUPP; + + jme->reg_pmcs = 0; + + if (wol->wolopts & WAKE_PHY) + jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; + + if (wol->wolopts & WAKE_MAGIC) + jme->reg_pmcs |= PMCS_MFEN; + + jwrite32(jme, JME_PMCS, jme->reg_pmcs); + device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs)); + + return 0; + } + + static int + jme_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + int rc; + + spin_lock_bh(&jme->phy_lock); + rc = mii_ethtool_gset(&(jme->mii_if), ecmd); + spin_unlock_bh(&jme->phy_lock); + return rc; + } + + static int + jme_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) + { + struct jme_adapter *jme = netdev_priv(netdev); + int rc, fdc = 0; + + if (ethtool_cmd_speed(ecmd) == SPEED_1000 + && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + /* + * Check If user changed duplex only while force_media. + * Hardware would not generate link change interrupt. + */ + if (jme->mii_if.force_media && + ecmd->autoneg != AUTONEG_ENABLE && + (jme->mii_if.full_duplex != ecmd->duplex)) + fdc = 1; + + spin_lock_bh(&jme->phy_lock); + rc = mii_ethtool_sset(&(jme->mii_if), ecmd); + spin_unlock_bh(&jme->phy_lock); + + if (!rc) { + if (fdc) + jme_reset_link(jme); + jme->old_ecmd = *ecmd; + set_bit(JME_FLAG_SSET, &jme->flags); + } + + return rc; + } + + static int + jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) + { + int rc; + struct jme_adapter *jme = netdev_priv(netdev); + struct mii_ioctl_data *mii_data = if_mii(rq); + unsigned int duplex_chg; + + if (cmd == SIOCSMIIREG) { + u16 val = mii_data->val_in; + if (!(val & (BMCR_RESET|BMCR_ANENABLE)) && + (val & BMCR_SPEED1000)) + return -EINVAL; + } + + spin_lock_bh(&jme->phy_lock); + rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg); + spin_unlock_bh(&jme->phy_lock); + + if (!rc && (cmd == SIOCSMIIREG)) { + if (duplex_chg) + jme_reset_link(jme); + jme_get_settings(netdev, &jme->old_ecmd); + set_bit(JME_FLAG_SSET, &jme->flags); + } + + return rc; + } + + static u32 + jme_get_link(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; + } + + static u32 + jme_get_msglevel(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + return jme->msg_enable; + } + + static void + jme_set_msglevel(struct net_device *netdev, u32 value) + { + struct jme_adapter *jme = netdev_priv(netdev); + jme->msg_enable = value; + } + + static u32 + jme_fix_features(struct net_device *netdev, u32 features) + { + if (netdev->mtu > 1900) + features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); + return features; + } + + static int + jme_set_features(struct net_device *netdev, u32 features) + { + struct jme_adapter *jme = netdev_priv(netdev); + + spin_lock_bh(&jme->rxmcs_lock); + if (features & NETIF_F_RXCSUM) + jme->reg_rxmcs |= RXMCS_CHECKSUM; + else + jme->reg_rxmcs &= ~RXMCS_CHECKSUM; + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + spin_unlock_bh(&jme->rxmcs_lock); + + return 0; + } + + static int + jme_nway_reset(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + jme_restart_an(jme); + return 0; + } + + static u8 + jme_smb_read(struct jme_adapter *jme, unsigned int addr) + { + u32 val; + int to; + + val = jread32(jme, JME_SMBCSR); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBCSR_BUSY) && --to) { + msleep(1); + val = jread32(jme, JME_SMBCSR); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return 0xFF; + } + + jwrite32(jme, JME_SMBINTF, + ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | + SMBINTF_HWRWN_READ | + SMBINTF_HWCMD); + + val = jread32(jme, JME_SMBINTF); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBINTF_HWCMD) && --to) { + msleep(1); + val = jread32(jme, JME_SMBINTF); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return 0xFF; + } + + return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; + } + + static void + jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) + { + u32 val; + int to; + + val = jread32(jme, JME_SMBCSR); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBCSR_BUSY) && --to) { + msleep(1); + val = jread32(jme, JME_SMBCSR); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return; + } + + jwrite32(jme, JME_SMBINTF, + ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | + ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | + SMBINTF_HWRWN_WRITE | + SMBINTF_HWCMD); + + val = jread32(jme, JME_SMBINTF); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBINTF_HWCMD) && --to) { + msleep(1); + val = jread32(jme, JME_SMBINTF); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return; + } + + mdelay(2); + } + + static int + jme_get_eeprom_len(struct net_device *netdev) + { + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + val = jread32(jme, JME_SMBCSR); + return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; + } + + static int + jme_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) + { + struct jme_adapter *jme = netdev_priv(netdev); + int i, offset = eeprom->offset, len = eeprom->len; + + /* + * ethtool will check the boundary for us + */ + eeprom->magic = JME_EEPROM_MAGIC; + for (i = 0 ; i < len ; ++i) + data[i] = jme_smb_read(jme, i + offset); + + return 0; + } + + static int + jme_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) + { + struct jme_adapter *jme = netdev_priv(netdev); + int i, offset = eeprom->offset, len = eeprom->len; + + if (eeprom->magic != JME_EEPROM_MAGIC) + return -EINVAL; + + /* + * ethtool will check the boundary for us + */ + for (i = 0 ; i < len ; ++i) + jme_smb_write(jme, i + offset, data[i]); + + return 0; + } + + static const struct ethtool_ops jme_ethtool_ops = { + .get_drvinfo = jme_get_drvinfo, + .get_regs_len = jme_get_regs_len, + .get_regs = jme_get_regs, + .get_coalesce = jme_get_coalesce, + .set_coalesce = jme_set_coalesce, + .get_pauseparam = jme_get_pauseparam, + .set_pauseparam = jme_set_pauseparam, + .get_wol = jme_get_wol, + .set_wol = jme_set_wol, + .get_settings = jme_get_settings, + .set_settings = jme_set_settings, + .get_link = jme_get_link, + .get_msglevel = jme_get_msglevel, + .set_msglevel = jme_set_msglevel, + .nway_reset = jme_nway_reset, + .get_eeprom_len = jme_get_eeprom_len, + .get_eeprom = jme_get_eeprom, + .set_eeprom = jme_set_eeprom, + }; + + static int + jme_pci_dma64(struct pci_dev *pdev) + { + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + return 1; + + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) + return 1; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + return 0; + + return -1; + } + + static inline void + jme_phy_init(struct jme_adapter *jme) + { + u16 reg26; + + reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); + } + + static inline void + jme_check_hw_ver(struct jme_adapter *jme) + { + u32 chipmode; + + chipmode = jread32(jme, JME_CHIPMODE); + + jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; + jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; + jme->chip_main_rev = jme->chiprev & 0xF; + jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; + } + + static const struct net_device_ops jme_netdev_ops = { + .ndo_open = jme_open, + .ndo_stop = jme_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = jme_ioctl, + .ndo_start_xmit = jme_start_xmit, + .ndo_set_mac_address = jme_set_macaddr, + .ndo_set_rx_mode = jme_set_multi, + .ndo_change_mtu = jme_change_mtu, + .ndo_tx_timeout = jme_tx_timeout, + .ndo_fix_features = jme_fix_features, + .ndo_set_features = jme_set_features, + }; + + static int __devinit + jme_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) + { + int rc = 0, using_dac, i; + struct net_device *netdev; + struct jme_adapter *jme; + u16 bmcr, bmsr; + u32 apmc; + + /* + * set up PCI device basics + */ + rc = pci_enable_device(pdev); + if (rc) { + pr_err("Cannot enable PCI device\n"); + goto err_out; + } + + using_dac = jme_pci_dma64(pdev); + if (using_dac < 0) { + pr_err("Cannot set PCI DMA Mask\n"); + rc = -EIO; + goto err_out_disable_pdev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + pr_err("No PCI resource region found\n"); + rc = -ENOMEM; + goto err_out_disable_pdev; + } + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) { + pr_err("Cannot obtain PCI resource region\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + /* + * alloc and init net device + */ + netdev = alloc_etherdev(sizeof(*jme)); + if (!netdev) { + pr_err("Cannot allocate netdev structure\n"); + rc = -ENOMEM; + goto err_out_release_regions; + } + netdev->netdev_ops = &jme_netdev_ops; + netdev->ethtool_ops = &jme_ethtool_ops; + netdev->watchdog_timeo = TX_TIMEOUT; + netdev->hw_features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; + netdev->features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; + if (using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + /* + * init adapter info + */ + jme = netdev_priv(netdev); + jme->pdev = pdev; + jme->dev = netdev; + jme->jme_rx = netif_rx; + jme->old_mtu = netdev->mtu = 1500; + jme->phylink = 0; + jme->tx_ring_size = 1 << 10; + jme->tx_ring_mask = jme->tx_ring_size - 1; + jme->tx_wake_threshold = 1 << 9; + jme->rx_ring_size = 1 << 9; + jme->rx_ring_mask = jme->rx_ring_size - 1; + jme->msg_enable = JME_DEF_MSG_ENABLE; + jme->regs = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!(jme->regs)) { + pr_err("Mapping PCI resource region error\n"); + rc = -ENOMEM; + goto err_out_free_netdev; + } + + if (no_pseudohp) { + apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; + jwrite32(jme, JME_APMC, apmc); + } else if (force_pseudohp) { + apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; + jwrite32(jme, JME_APMC, apmc); + } + + NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) + + spin_lock_init(&jme->phy_lock); + spin_lock_init(&jme->macaddr_lock); + spin_lock_init(&jme->rxmcs_lock); + + atomic_set(&jme->link_changing, 1); + atomic_set(&jme->rx_cleaning, 1); + atomic_set(&jme->tx_cleaning, 1); + atomic_set(&jme->rx_empty, 1); + + tasklet_init(&jme->pcc_task, + jme_pcc_tasklet, + (unsigned long) jme); + tasklet_init(&jme->linkch_task, + jme_link_change_tasklet, + (unsigned long) jme); + tasklet_init(&jme->txclean_task, + jme_tx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxclean_task, + jme_rx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxempty_task, + jme_rx_empty_tasklet, + (unsigned long) jme); + tasklet_disable_nosync(&jme->linkch_task); + tasklet_disable_nosync(&jme->txclean_task); + tasklet_disable_nosync(&jme->rxclean_task); + tasklet_disable_nosync(&jme->rxempty_task); + jme->dpi.cur = PCC_P1; + + jme->reg_ghc = 0; + jme->reg_rxcs = RXCS_DEFAULT; + jme->reg_rxmcs = RXMCS_DEFAULT; + jme->reg_txpfc = 0; + jme->reg_pmcs = PMCS_MFEN; + jme->reg_gpreg1 = GPREG1_DEFAULT; + + if (jme->reg_rxmcs & RXMCS_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; + + /* + * Get Max Read Req Size from PCI Config Space + */ + pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); + jme->mrrs &= PCI_DCSR_MRRS_MASK; + switch (jme->mrrs) { + case MRRS_128B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; + break; + case MRRS_256B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; + break; + default: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; + break; + } + + /* + * Must check before reset_mac_processor + */ + jme_check_hw_ver(jme); + jme->mii_if.dev = netdev; + if (jme->fpgaver) { + jme->mii_if.phy_id = 0; + for (i = 1 ; i < 32 ; ++i) { + bmcr = jme_mdio_read(netdev, i, MII_BMCR); + bmsr = jme_mdio_read(netdev, i, MII_BMSR); + if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { + jme->mii_if.phy_id = i; + break; + } + } + + if (!jme->mii_if.phy_id) { + rc = -EIO; + pr_err("Can not find phy_id\n"); + goto err_out_unmap; + } + + jme->reg_ghc |= GHC_LINK_POLL; + } else { + jme->mii_if.phy_id = 1; + } + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme->mii_if.supports_gmii = true; + else + jme->mii_if.supports_gmii = false; + jme->mii_if.phy_id_mask = 0x1F; + jme->mii_if.reg_num_mask = 0x1F; + jme->mii_if.mdio_read = jme_mdio_read; + jme->mii_if.mdio_write = jme_mdio_write; + + jme_clear_pm(jme); + pci_set_power_state(jme->pdev, PCI_D0); + device_set_wakeup_enable(&pdev->dev, true); + + jme_set_phyfifo_5level(jme); + jme->pcirev = pdev->revision; + if (!jme->fpgaver) + jme_phy_init(jme); + jme_phy_off(jme); + + /* + * Reset MAC processor and reload EEPROM for MAC Address + */ + jme_reset_mac_processor(jme); + rc = jme_reload_eeprom(jme); + if (rc) { + pr_err("Reload eeprom for reading MAC Address error\n"); + goto err_out_unmap; + } + jme_load_macaddr(netdev); + + /* + * Tell stack that we are not ready to work until open() + */ + netif_carrier_off(netdev); + + rc = register_netdev(netdev); + if (rc) { + pr_err("Cannot register net device\n"); + goto err_out_unmap; + } + + netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n", + (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? + "JMC250 Gigabit Ethernet" : + (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? + "JMC260 Fast Ethernet" : "Unknown", + (jme->fpgaver != 0) ? " (FPGA)" : "", + (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, + jme->pcirev, netdev->dev_addr); + + return 0; + + err_out_unmap: + iounmap(jme->regs); + err_out_free_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + err_out_release_regions: + pci_release_regions(pdev); + err_out_disable_pdev: + pci_disable_device(pdev); + err_out: + return rc; + } + + static void __devexit + jme_remove_one(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + unregister_netdev(netdev); + iounmap(jme->regs); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + + } + + static void + jme_shutdown(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + jme_powersave_phy(jme); + pci_pme_active(pdev, true); + } + + #ifdef CONFIG_PM_SLEEP + static int + jme_suspend(struct device *dev) + { + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + ++ if (!netif_running(netdev)) ++ return 0; ++ + atomic_dec(&jme->link_changing); + + netif_device_detach(netdev); + netif_stop_queue(netdev); + jme_stop_irq(jme); + + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + if (netif_carrier_ok(netdev)) { + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_polling_mode(jme); + + jme_stop_pcc_timer(jme); + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + netif_carrier_off(netdev); + jme->phylink = 0; + } + + tasklet_enable(&jme->txclean_task); + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + + jme_powersave_phy(jme); + + return 0; + } + + static int + jme_resume(struct device *dev) + { + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + ++ if (!netif_running(netdev)) ++ return 0; ++ + jme_clear_pm(jme); + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + else + jme_reset_phy_processor(jme); + + jme_start_irq(jme); + netif_device_attach(netdev); + + atomic_inc(&jme->link_changing); + + jme_reset_link(jme); + + return 0; + } + + static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); + #define JME_PM_OPS (&jme_pm_ops) + + #else + + #define JME_PM_OPS NULL + #endif + + static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, + { } + }; + + static struct pci_driver jme_driver = { + .name = DRV_NAME, + .id_table = jme_pci_tbl, + .probe = jme_init_one, + .remove = __devexit_p(jme_remove_one), + .shutdown = jme_shutdown, + .driver.pm = JME_PM_OPS, + }; + + static int __init + jme_init_module(void) + { + pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION); + return pci_register_driver(&jme_driver); + } + + static void __exit + jme_cleanup_module(void) + { + pci_unregister_driver(&jme_driver); + } + + module_init(jme_init_module); + module_exit(jme_cleanup_module); + + MODULE_AUTHOR("Guo-Fu Tseng cooldavid@cooldavid.org"); + MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_VERSION); + MODULE_DEVICE_TABLE(pci, jme_pci_tbl); + diff --combined drivers/net/ethernet/mellanox/mlx4/en_tx.c index 0000000,75338eb..90f2cd2 mode 000000,100644..100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@@ -1,0 -1,816 +1,816 @@@ + /* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + + #include <asm/page.h> + #include <linux/mlx4/cq.h> + #include <linux/slab.h> + #include <linux/mlx4/qp.h> + #include <linux/skbuff.h> + #include <linux/if_vlan.h> + #include <linux/vmalloc.h> + #include <linux/tcp.h> + + #include "mlx4_en.h" + + enum { + MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ + MAX_BF = 256, + }; + + static int inline_thold __read_mostly = MAX_INLINE; + + module_param_named(inline_thold, inline_thold, int, 0444); + MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); + + int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, int qpn, u32 size, + u16 stride) + { + struct mlx4_en_dev *mdev = priv->mdev; + int tmp; + int err; + + ring->size = size; + ring->size_mask = size - 1; + ring->stride = stride; + + inline_thold = min(inline_thold, MAX_INLINE); + + spin_lock_init(&ring->comp_lock); + + tmp = size * sizeof(struct mlx4_en_tx_info); + ring->tx_info = vmalloc(tmp); + if (!ring->tx_info) { + en_err(priv, "Failed allocating tx_info ring\n"); + return -ENOMEM; + } + en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", + ring->tx_info, tmp); + + ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); + if (!ring->bounce_buf) { + en_err(priv, "Failed allocating bounce buffer\n"); + err = -ENOMEM; + goto err_tx; + } + ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); + + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, + 2 * PAGE_SIZE); + if (err) { + en_err(priv, "Failed allocating hwq resources\n"); + goto err_bounce; + } + + err = mlx4_en_map_buffer(&ring->wqres.buf); + if (err) { + en_err(priv, "Failed to map TX buffer\n"); + goto err_hwq_res; + } + + ring->buf = ring->wqres.buf.direct.buf; + + en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " + "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, + ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); + + ring->qpn = qpn; + err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); + if (err) { + en_err(priv, "Failed allocating qp %d\n", ring->qpn); + goto err_map; + } + ring->qp.event = mlx4_en_sqp_event; + + err = mlx4_bf_alloc(mdev->dev, &ring->bf); + if (err) { + en_dbg(DRV, priv, "working without blueflame (%d)", err); + ring->bf.uar = &mdev->priv_uar; + ring->bf.uar->map = mdev->uar_map; + ring->bf_enabled = false; + } else + ring->bf_enabled = true; + + return 0; + + err_map: + mlx4_en_unmap_buffer(&ring->wqres.buf); + err_hwq_res: + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + err_bounce: + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; + err_tx: + vfree(ring->tx_info); + ring->tx_info = NULL; + return err; + } + + void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring) + { + struct mlx4_en_dev *mdev = priv->mdev; + en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); + + if (ring->bf_enabled) + mlx4_bf_free(mdev->dev, &ring->bf); + mlx4_qp_remove(mdev->dev, &ring->qp); + mlx4_qp_free(mdev->dev, &ring->qp); + mlx4_qp_release_range(mdev->dev, ring->qpn, 1); + mlx4_en_unmap_buffer(&ring->wqres.buf); + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; + vfree(ring->tx_info); + ring->tx_info = NULL; + } + + int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int cq) + { + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + ring->cqn = cq; + ring->prod = 0; + ring->cons = 0xffffffff; + ring->last_nr_txbb = 1; + ring->poll_cnt = 0; + ring->blocked = 0; + memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); + memset(ring->buf, 0, ring->buf_size); + + ring->qp_state = MLX4_QP_STATE_RST; - ring->doorbell_qpn = swab32(ring->qp.qpn << 8); ++ ring->doorbell_qpn = ring->qp.qpn << 8; + + mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, + ring->cqn, &ring->context); + if (ring->bf_enabled) + ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); + + err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, + &ring->qp, &ring->qp_state); + + return err; + } + + void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring) + { + struct mlx4_en_dev *mdev = priv->mdev; + + mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); + } + + + static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner) + { + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; + struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; + struct sk_buff *skb = tx_info->skb; + struct skb_frag_struct *frag; + void *end = ring->buf + ring->buf_size; + int frags = skb_shinfo(skb)->nr_frags; + int i; + __be32 *ptr = (__be32 *)tx_desc; + __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); + + /* Optimize the common case when there are no wraparounds */ + if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { + if (!tx_info->inl) { + if (tx_info->linear) { + pci_unmap_single(mdev->pdev, + (dma_addr_t) be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + ++data; + } + + for (i = 0; i < frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + pci_unmap_page(mdev->pdev, + (dma_addr_t) be64_to_cpu(data[i].addr), + skb_frag_size(frag), PCI_DMA_TODEVICE); + } + } + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + } + + } else { + if (!tx_info->inl) { + if ((void *) data >= end) { + data = ring->buf + ((void *)data - end); + } + + if (tx_info->linear) { + pci_unmap_single(mdev->pdev, + (dma_addr_t) be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + ++data; + } + + for (i = 0; i < frags; i++) { + /* Check for wraparound before unmapping */ + if ((void *) data >= end) + data = ring->buf; + frag = &skb_shinfo(skb)->frags[i]; + pci_unmap_page(mdev->pdev, + (dma_addr_t) be64_to_cpu(data->addr), + skb_frag_size(frag), PCI_DMA_TODEVICE); + ++data; + } + } + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + if ((void *) ptr >= end) { + ptr = ring->buf; + stamp ^= cpu_to_be32(0x80000000); + } + } + + } + dev_kfree_skb_any(skb); + return tx_info->nr_txbb; + } + + + int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + int cnt = 0; + + /* Skip last polled descriptor */ + ring->cons += ring->last_nr_txbb; + en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", + ring->cons, ring->prod); + + if ((u32) (ring->prod - ring->cons) > ring->size) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Tx consumer passed producer!\n"); + return 0; + } + + while (ring->cons != ring->prod) { + ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, + ring->cons & ring->size_mask, + !!(ring->cons & ring->size)); + ring->cons += ring->last_nr_txbb; + cnt++; + } + + if (cnt) + en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); + + return cnt; + } + + + static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_cq *mcq = &cq->mcq; + struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + struct mlx4_cqe *cqe = cq->buf; + u16 index; + u16 new_index; + u32 txbbs_skipped = 0; + u32 cq_last_sav; + + /* index always points to the first TXBB of the last polled descriptor */ + index = ring->cons & ring->size_mask; + new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; + if (index == new_index) + return; + + if (!priv->port_up) + return; + + /* + * We use a two-stage loop: + * - the first samples the HW-updated CQE + * - the second frees TXBBs until the last sample + * This lets us amortize CQE cache misses, while still polling the CQ + * until is quiescent. + */ + cq_last_sav = mcq->cons_index; + do { + do { + /* Skip over last polled CQE */ + index = (index + ring->last_nr_txbb) & ring->size_mask; + txbbs_skipped += ring->last_nr_txbb; + + /* Poll next CQE */ + ring->last_nr_txbb = mlx4_en_free_tx_desc( + priv, ring, index, + !!((ring->cons + txbbs_skipped) & + ring->size)); + ++mcq->cons_index; + + } while (index != new_index); + + new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; + } while (index != new_index); + AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, + (u32) (mcq->cons_index - cq_last_sav)); + + /* + * To prevent CQ overflow we first update CQ consumer and only then + * the ring consumer. + */ + mlx4_cq_set_ci(mcq); + wmb(); + ring->cons += txbbs_skipped; + + /* Wakeup Tx queue if this ring stopped it */ + if (unlikely(ring->blocked)) { + if ((u32) (ring->prod - ring->cons) <= + ring->size - HEADROOM - MAX_DESC_TXBBS) { + ring->blocked = 0; + netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); + priv->port_stats.wake_queue++; + } + } + } + + void mlx4_en_tx_irq(struct mlx4_cq *mcq) + { + struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + + if (!spin_trylock(&ring->comp_lock)) + return; + mlx4_en_process_tx_cq(cq->dev, cq); + mod_timer(&cq->timer, jiffies + 1); + spin_unlock(&ring->comp_lock); + } + + + void mlx4_en_poll_tx_cq(unsigned long data) + { + struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + u32 inflight; + + INC_PERF_COUNTER(priv->pstats.tx_poll); + + if (!spin_trylock_irq(&ring->comp_lock)) { + mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); + return; + } + mlx4_en_process_tx_cq(cq->dev, cq); + inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); + + /* If there are still packets in flight and the timer has not already + * been scheduled by the Tx routine then schedule it here to guarantee + * completion processing of these packets */ + if (inflight && priv->port_up) + mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); + + spin_unlock_irq(&ring->comp_lock); + } + + static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + u32 index, + unsigned int desc_size) + { + u32 copy = (ring->size - index) * TXBB_SIZE; + int i; + + for (i = desc_size - copy - 4; i >= 0; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + i)) = + *((u32 *) (ring->bounce_buf + copy + i)); + } + + for (i = copy - 4; i >= 4 ; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = + *((u32 *) (ring->bounce_buf + i)); + } + + /* Return real descriptor location */ + return ring->buf + index * TXBB_SIZE; + } + + static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) + { + struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; + struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; + unsigned long flags; + + /* If we don't have a pending timer, set one up to catch our recent + post in case the interface becomes idle */ + if (!timer_pending(&cq->timer)) + mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); + + /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ + if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) + if (spin_trylock_irqsave(&ring->comp_lock, flags)) { + mlx4_en_process_tx_cq(priv->dev, cq); + spin_unlock_irqrestore(&ring->comp_lock, flags); + } + } + + static int is_inline(struct sk_buff *skb, void **pfrag) + { + void *ptr; + + if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { + if (skb_shinfo(skb)->nr_frags == 1) { + ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]); + if (unlikely(!ptr)) + return 0; + + if (pfrag) + *pfrag = ptr; + + return 1; + } else if (unlikely(skb_shinfo(skb)->nr_frags)) + return 0; + else + return 1; + } + + return 0; + } + + static int inline_size(struct sk_buff *skb) + { + if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) + <= MLX4_INLINE_ALIGN) + return ALIGN(skb->len + CTRL_SIZE + + sizeof(struct mlx4_wqe_inline_seg), 16); + else + return ALIGN(skb->len + CTRL_SIZE + 2 * + sizeof(struct mlx4_wqe_inline_seg), 16); + } + + static int get_real_size(struct sk_buff *skb, struct net_device *dev, + int *lso_header_size) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + int real_size; + + if (skb_is_gso(skb)) { + *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); + real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + + ALIGN(*lso_header_size + 4, DS_SIZE); + if (unlikely(*lso_header_size != skb_headlen(skb))) { + /* We add a segment for the skb linear buffer only if + * it contains data */ + if (*lso_header_size < skb_headlen(skb)) + real_size += DS_SIZE; + else { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Non-linear headers\n"); + return 0; + } + } + } else { + *lso_header_size = 0; + if (!is_inline(skb, NULL)) + real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; + else + real_size = inline_size(skb); + } + + return real_size; + } + + static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, + int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) + { + struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; + int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; + + if (skb->len <= spc) { + inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); + if (skb_shinfo(skb)->nr_frags) + memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, + skb_frag_size(&skb_shinfo(skb)->frags[0])); + + } else { + inl->byte_count = cpu_to_be32(1 << 31 | spc); + if (skb_headlen(skb) <= spc) { + skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); + if (skb_headlen(skb) < spc) { + memcpy(((void *)(inl + 1)) + skb_headlen(skb), + fragptr, spc - skb_headlen(skb)); + fragptr += spc - skb_headlen(skb); + } + inl = (void *) (inl + 1) + spc; + memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); + } else { + skb_copy_from_linear_data(skb, inl + 1, spc); + inl = (void *) (inl + 1) + spc; + skb_copy_from_linear_data_offset(skb, spc, inl + 1, + skb_headlen(skb) - spc); + if (skb_shinfo(skb)->nr_frags) + memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, + fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0])); + } + + wmb(); + inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); + } + tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); + tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; + } + + u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + u16 vlan_tag = 0; + + /* If we support per priority flow control and the packet contains + * a vlan tag, send the packet to the TX ring assigned to that priority + */ + if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) { + vlan_tag = vlan_tx_tag_get(skb); + return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); + } + + return skb_tx_hash(dev, skb); + } + + static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) + { + __iowrite64_copy(dst, src, bytecnt / 8); + } + + netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring *ring; + struct mlx4_en_cq *cq; + struct mlx4_en_tx_desc *tx_desc; + struct mlx4_wqe_data_seg *data; + struct skb_frag_struct *frag; + struct mlx4_en_tx_info *tx_info; + struct ethhdr *ethh; + u64 mac; + u32 mac_l, mac_h; + int tx_ind = 0; + int nr_txbb; + int desc_size; + int real_size; + dma_addr_t dma; + u32 index, bf_index; + __be32 op_own; + u16 vlan_tag = 0; + int i; + int lso_header_size; + void *fragptr; + bool bounce = false; + + if (!priv->port_up) + goto tx_drop; + + real_size = get_real_size(skb, dev, &lso_header_size); + if (unlikely(!real_size)) + goto tx_drop; + + /* Align descriptor to TXBB size */ + desc_size = ALIGN(real_size, TXBB_SIZE); + nr_txbb = desc_size / TXBB_SIZE; + if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Oversized header or SG list\n"); + goto tx_drop; + } + + tx_ind = skb->queue_mapping; + ring = &priv->tx_ring[tx_ind]; + if (vlan_tx_tag_present(skb)) + vlan_tag = vlan_tx_tag_get(skb); + + /* Check available TXBBs And 2K spare for prefetch */ + if (unlikely(((int)(ring->prod - ring->cons)) > + ring->size - HEADROOM - MAX_DESC_TXBBS)) { + /* every full Tx ring stops queue */ + netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); + ring->blocked = 1; + priv->port_stats.queue_stopped++; + + /* Use interrupts to find out when queue opened */ + cq = &priv->tx_cq[tx_ind]; + mlx4_en_arm_cq(priv, cq); + return NETDEV_TX_BUSY; + } + + /* Track current inflight packets for performance analysis */ + AVG_PERF_COUNTER(priv->pstats.inflight_avg, + (u32) (ring->prod - ring->cons - 1)); + + /* Packet is good - grab an index and transmit it */ + index = ring->prod & ring->size_mask; + bf_index = ring->prod; + + /* See if we have enough space for whole descriptor TXBB for setting + * SW ownership on next descriptor; if not, use a bounce buffer. */ + if (likely(index + nr_txbb <= ring->size)) + tx_desc = ring->buf + index * TXBB_SIZE; + else { + tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; + bounce = true; + } + + /* Save skb in tx_info ring */ + tx_info = &ring->tx_info[index]; + tx_info->skb = skb; + tx_info->nr_txbb = nr_txbb; + + /* Prepare ctrl segement apart opcode+ownership, which depends on + * whether LSO is used */ + tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; + tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; + tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | + MLX4_WQE_CTRL_SOLICITED); + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + ring->tx_csum++; + } + + if (unlikely(priv->validate_loopback)) { + /* Copy dst mac address to wqe */ + skb_reset_mac_header(skb); + ethh = eth_hdr(skb); + if (ethh && ethh->h_dest) { + mac = mlx4_en_mac_to_u64(ethh->h_dest); + mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); + mac_l = (u32) (mac & 0xffffffff); + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); + tx_desc->ctrl.imm = cpu_to_be32(mac_l); + } + } + + /* Handle LSO (TSO) packets */ + if (lso_header_size) { + /* Mark opcode as LSO */ + op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + + /* Fill in the LSO prefix */ + tx_desc->lso.mss_hdr_size = cpu_to_be32( + skb_shinfo(skb)->gso_size << 16 | lso_header_size); + + /* Copy headers; + * note that we already verified that it is linear */ + memcpy(tx_desc->lso.header, skb->data, lso_header_size); + data = ((void *) &tx_desc->lso + + ALIGN(lso_header_size + 4, DS_SIZE)); + + priv->port_stats.tso_packets++; + i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + + !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); + ring->bytes += skb->len + (i - 1) * lso_header_size; + ring->packets += i; + } else { + /* Normal (Non LSO) packet */ + op_own = cpu_to_be32(MLX4_OPCODE_SEND) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + data = &tx_desc->data; + ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); + ring->packets++; + + } + AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); + + + /* valid only for none inline segments */ + tx_info->data_offset = (void *) data - (void *) tx_desc; + + tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; + data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; + + if (!is_inline(skb, &fragptr)) { + /* Map fragments */ + for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { + frag = &skb_shinfo(skb)->frags[i]; + dma = skb_frag_dma_map(&mdev->dev->pdev->dev, frag, + 0, skb_frag_size(frag), + DMA_TO_DEVICE); + data->addr = cpu_to_be64(dma); + data->lkey = cpu_to_be32(mdev->mr.key); + wmb(); + data->byte_count = cpu_to_be32(skb_frag_size(frag)); + --data; + } + + /* Map linear part */ + if (tx_info->linear) { + dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size, + skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); + data->addr = cpu_to_be64(dma); + data->lkey = cpu_to_be32(mdev->mr.key); + wmb(); + data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); + } + tx_info->inl = 0; + } else { + build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); + tx_info->inl = 1; + } + + ring->prod += nr_txbb; + + /* If we used a bounce buffer then copy descriptor back into place */ + if (bounce) + tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); + + /* Run destructor before passing skb to HW */ + if (likely(!skb_shared(skb))) + skb_orphan(skb); + + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { - *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; ++ *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); + op_own |= htonl((bf_index & 0xffff) << 8); + /* Ensure new descirptor hits memory + * before setting ownership of this descriptor to HW */ + wmb(); + tx_desc->ctrl.owner_opcode = op_own; + + wmb(); + + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, + desc_size); + + wmb(); + + ring->bf.offset ^= ring->bf.buf_size; + } else { + /* Ensure new descirptor hits memory + * before setting ownership of this descriptor to HW */ + wmb(); + tx_desc->ctrl.owner_opcode = op_own; + wmb(); - writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); ++ iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); + } + + /* Poll CQ here */ + mlx4_en_xmit_poll(priv, tx_ind); + + return NETDEV_TX_OK; + + tx_drop: + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; + } + diff --combined drivers/net/ethernet/mellanox/mlx4/eq.c index 869a2c2,1ad1f60..869a2c2 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@@ -484,7 -484,7 +484,7 @@@ static void mlx4_free_eq(struct mlx4_de
mlx4_mtt_cleanup(dev, &eq->mtt); for (i = 0; i < npages; ++i) - pci_free_consistent(dev->pdev, PAGE_SIZE, + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, eq->page_list[i].buf, eq->page_list[i].map);
diff --combined drivers/net/ethernet/mellanox/mlx4/fw.c index 0000000,ed452dd..abdfbac mode 000000,100644..100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@@ -1,0 -1,945 +1,951 @@@ + /* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + #include <linux/mlx4/cmd.h> + #include <linux/cache.h> + + #include "fw.h" + #include "icm.h" + + enum { + MLX4_COMMAND_INTERFACE_MIN_REV = 2, + MLX4_COMMAND_INTERFACE_MAX_REV = 3, + MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, + }; + + extern void __buggy_use_of_MLX4_GET(void); + extern void __buggy_use_of_MLX4_PUT(void); + + static int enable_qos; + module_param(enable_qos, bool, 0444); + MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)"); + + #define MLX4_GET(dest, source, offset) \ + do { \ + void *__p = (char *) (source) + (offset); \ + switch (sizeof (dest)) { \ + case 1: (dest) = *(u8 *) __p; break; \ + case 2: (dest) = be16_to_cpup(__p); break; \ + case 4: (dest) = be32_to_cpup(__p); break; \ + case 8: (dest) = be64_to_cpup(__p); break; \ + default: __buggy_use_of_MLX4_GET(); \ + } \ + } while (0) + + #define MLX4_PUT(dest, source, offset) \ + do { \ + void *__d = ((char *) (dest) + (offset)); \ + switch (sizeof(source)) { \ + case 1: *(u8 *) __d = (source); break; \ + case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ + case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ + case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ + default: __buggy_use_of_MLX4_PUT(); \ + } \ + } while (0) + + static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) + { + static const char *fname[] = { + [ 0] = "RC transport", + [ 1] = "UC transport", + [ 2] = "UD transport", + [ 3] = "XRC transport", + [ 4] = "reliable multicast", + [ 5] = "FCoIB support", + [ 6] = "SRQ support", + [ 7] = "IPoIB checksum offload", + [ 8] = "P_Key violation counter", + [ 9] = "Q_Key violation counter", + [10] = "VMM", + [12] = "DPDP", + [15] = "Big LSO headers", + [16] = "MW support", + [17] = "APM support", + [18] = "Atomic ops support", + [19] = "Raw multicast support", + [20] = "Address vector port checking support", + [21] = "UD multicast support", + [24] = "Demand paging support", + [25] = "Router support", + [30] = "IBoE support", + [32] = "Unicast loopback support", + [34] = "FCS header control", + [38] = "Wake On LAN support", + [40] = "UDP RSS support", + [41] = "Unicast VEP steering support", + [42] = "Multicast VEP steering support", + [48] = "Counters support", + }; + int i; + + mlx4_dbg(dev, "DEV_CAP flags:\n"); + for (i = 0; i < ARRAY_SIZE(fname); ++i) + if (fname[i] && (flags & (1LL << i))) + mlx4_dbg(dev, " %s\n", fname[i]); + } + + int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) + { + struct mlx4_cmd_mailbox *mailbox; + u32 *inbox; + int err = 0; + + #define MOD_STAT_CFG_IN_SIZE 0x100 + + #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 + #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + memset(inbox, 0, MOD_STAT_CFG_IN_SIZE); + + MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); + MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, + MLX4_CMD_TIME_CLASS_A); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) + { + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field; + u32 field32, flags, ext_flags; + u16 size; + u16 stat_rate; + int err; + int i; + + #define QUERY_DEV_CAP_OUT_SIZE 0x100 + #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 + #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 + #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 + #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 + #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 + #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 + #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 + #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 + #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 + #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a + #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b + #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d + #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e + #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f + #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 + #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 + #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 + #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 + #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 + #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 + #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b + #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d + #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f + #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 + #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 + #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 + #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 + #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 + #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b + #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c + #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f + #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 + #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 + #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 + #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 + #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b + #define QUERY_DEV_CAP_BF_OFFSET 0x4c + #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d + #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e + #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f + #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 + #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 + #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 + #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 + #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 + #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 + #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 + #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 + #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 ++#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 ++#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 + #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 + #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 + #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 + #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 + #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 + #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 + #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a + #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c + #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e + #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 + #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 + #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 + #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 + #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); + dev_cap->reserved_qps = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); + dev_cap->max_qps = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); + dev_cap->reserved_srqs = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); + dev_cap->max_srqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); + dev_cap->max_cq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); + dev_cap->reserved_cqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); + dev_cap->max_cqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); + dev_cap->max_mpts = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); + dev_cap->reserved_eqs = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); + dev_cap->max_eqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); + dev_cap->reserved_mtts = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET); + dev_cap->max_mrw_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); + dev_cap->reserved_mrws = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); + dev_cap->max_mtt_seg = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); + dev_cap->max_requester_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); + dev_cap->max_responder_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); + field &= 0x1f; + if (!field) + dev_cap->max_gso_sz = 0; + else + dev_cap->max_gso_sz = 1 << field; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); + dev_cap->max_rdma_global = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); + dev_cap->local_ca_ack_delay = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + dev_cap->num_ports = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); + dev_cap->max_msg_sz = 1 << (field & 0x1f); + MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); + dev_cap->stat_rate_support = stat_rate; + MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); + dev_cap->flags = flags | (u64)ext_flags << 32; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); + dev_cap->reserved_uars = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); + dev_cap->uar_size = 1 << ((field & 0x3f) + 20); + MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); + dev_cap->min_page_sz = 1 << field; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); + if (field & 0x80) { + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); + dev_cap->bf_reg_size = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); + if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) + field = 3; + dev_cap->bf_regs_per_page = 1 << (field & 0x3f); + mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", + dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); + } else { + dev_cap->bf_reg_size = 0; + mlx4_dbg(dev, "BlueFlame not available\n"); + } + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); + dev_cap->max_sq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); + dev_cap->max_sq_desc_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); + dev_cap->max_qp_per_mcg = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); + dev_cap->reserved_mgms = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); + dev_cap->max_mcgs = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); + dev_cap->reserved_pds = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); + dev_cap->max_pds = 1 << (field & 0x3f); ++ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); ++ dev_cap->reserved_xrcds = field >> 4; ++ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); ++ dev_cap->max_xrcds = 1 << (field & 0x1f); + + MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); + dev_cap->rdmarc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); + dev_cap->qpc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); + dev_cap->aux_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); + dev_cap->altc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); + dev_cap->eqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); + dev_cap->cqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); + dev_cap->srq_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); + dev_cap->cmpt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); + dev_cap->mtt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); + dev_cap->dmpt_entry_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); + dev_cap->max_srq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); + dev_cap->max_qp_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); + dev_cap->resize_srq = field & 1; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); + dev_cap->max_rq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); + dev_cap->max_rq_desc_sz = size; + + MLX4_GET(dev_cap->bmme_flags, outbox, + QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + MLX4_GET(dev_cap->reserved_lkey, outbox, + QUERY_DEV_CAP_RSVD_LKEY_OFFSET); + MLX4_GET(dev_cap->max_icm_sz, outbox, + QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); + if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) + MLX4_GET(dev_cap->max_counters, outbox, + QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + for (i = 1; i <= dev_cap->num_ports; ++i) { + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + dev_cap->max_vl[i] = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); + dev_cap->ib_mtu[i] = field >> 4; + dev_cap->max_port_width[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); + dev_cap->max_gids[i] = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); + dev_cap->max_pkeys[i] = 1 << (field & 0xf); + } + } else { + #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 + #define QUERY_PORT_MTU_OFFSET 0x01 + #define QUERY_PORT_ETH_MTU_OFFSET 0x02 + #define QUERY_PORT_WIDTH_OFFSET 0x06 + #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 + #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a + #define QUERY_PORT_MAX_VL_OFFSET 0x0b + #define QUERY_PORT_MAC_OFFSET 0x10 + #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 + #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c + #define QUERY_PORT_TRANS_CODE_OFFSET 0x20 + + for (i = 1; i <= dev_cap->num_ports; ++i) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, + MLX4_CMD_TIME_CLASS_B); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); + dev_cap->supported_port_types[i] = field & 3; + MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); + dev_cap->ib_mtu[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); + dev_cap->max_port_width[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); + dev_cap->max_gids[i] = 1 << (field >> 4); + dev_cap->max_pkeys[i] = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); + dev_cap->max_vl[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); + dev_cap->log_max_macs[i] = field & 0xf; + dev_cap->log_max_vlans[i] = field >> 4; + MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); + MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); + MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); + dev_cap->trans_type[i] = field32 >> 24; + dev_cap->vendor_oui[i] = field32 & 0xffffff; + MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET); + MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET); + } + } + + mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", + dev_cap->bmme_flags, dev_cap->reserved_lkey); + + /* + * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then + * we can't use any EQs whose doorbell falls on that page, + * even if the EQ itself isn't reserved. + */ + dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, + dev_cap->reserved_eqs); + + mlx4_dbg(dev, "Max ICM size %lld MB\n", + (unsigned long long) dev_cap->max_icm_sz >> 20); + mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", + dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); + mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); + mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", + dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); + mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); + mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", + dev_cap->reserved_mrws, dev_cap->reserved_mtts); + mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", + dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); + mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", + dev_cap->max_pds, dev_cap->reserved_mgms); + mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", + dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); + mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", + dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1], + dev_cap->max_port_width[1]); + mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", + dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); + mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", + dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); + mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); + mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); + + dump_dev_cap_flags(dev, dev_cap->flags); + + out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) + { + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_icm_iter iter; + __be64 *pages; + int lg; + int nent = 0; + int i; + int err = 0; + int ts = 0, tc = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + pages = mailbox->buf; + + for (mlx4_icm_first(icm, &iter); + !mlx4_icm_last(&iter); + mlx4_icm_next(&iter)) { + /* + * We have to pass pages that are aligned to their + * size, so find the least significant 1 in the + * address or size and use that as our log2 size. + */ + lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; + if (lg < MLX4_ICM_PAGE_SHIFT) { + mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", + MLX4_ICM_PAGE_SIZE, + (unsigned long long) mlx4_icm_addr(&iter), + mlx4_icm_size(&iter)); + err = -EINVAL; + goto out; + } + + for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { + if (virt != -1) { + pages[nent * 2] = cpu_to_be64(virt); + virt += 1 << lg; + } + + pages[nent * 2 + 1] = + cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | + (lg - MLX4_ICM_PAGE_SHIFT)); + ts += 1 << (lg - 10); + ++tc; + + if (++nent == MLX4_MAILBOX_SIZE / 16) { + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, + MLX4_CMD_TIME_CLASS_B); + if (err) + goto out; + nent = 0; + } + } + } + + if (nent) + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B); + if (err) + goto out; + + switch (op) { + case MLX4_CMD_MAP_FA: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM_AUX: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM: + mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", + tc, ts, (unsigned long long) virt - (ts << 10)); + break; + } + + out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) + { + return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); + } + + int mlx4_UNMAP_FA(struct mlx4_dev *dev) + { + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B); + } + + + int mlx4_RUN_FW(struct mlx4_dev *dev) + { + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A); + } + + int mlx4_QUERY_FW(struct mlx4_dev *dev) + { + struct mlx4_fw *fw = &mlx4_priv(dev)->fw; + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err = 0; + u64 fw_ver; + u16 cmd_if_rev; + u8 lg; + + #define QUERY_FW_OUT_SIZE 0x100 + #define QUERY_FW_VER_OFFSET 0x00 + #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a + #define QUERY_FW_MAX_CMD_OFFSET 0x0f + #define QUERY_FW_ERR_START_OFFSET 0x30 + #define QUERY_FW_ERR_SIZE_OFFSET 0x38 + #define QUERY_FW_ERR_BAR_OFFSET 0x3c + + #define QUERY_FW_SIZE_OFFSET 0x00 + #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 + #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, + MLX4_CMD_TIME_CLASS_A); + if (err) + goto out; + + MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); + /* + * FW subminor version is at more significant bits than minor + * version, so swap here. + */ + dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | + ((fw_ver & 0xffff0000ull) >> 16) | + ((fw_ver & 0x0000ffffull) << 16); + + MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || + cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { + mlx4_err(dev, "Installed FW has unsupported " + "command interface revision %d.\n", + cmd_if_rev); + mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", + (int) (dev->caps.fw_ver >> 32), + (int) (dev->caps.fw_ver >> 16) & 0xffff, + (int) dev->caps.fw_ver & 0xffff); + mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", + MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); + err = -ENODEV; + goto out; + } + + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) + dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; + + MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); + cmd->max_cmds = 1 << lg; + + mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", + (int) (dev->caps.fw_ver >> 32), + (int) (dev->caps.fw_ver >> 16) & 0xffff, + (int) dev->caps.fw_ver & 0xffff, + cmd_if_rev, cmd->max_cmds); + + MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); + MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); + MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); + fw->catas_bar = (fw->catas_bar >> 6) * 2; + + mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", + (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); + + MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); + MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); + MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); + fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; + + mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + fw->fw_pages = + ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", + (unsigned long long) fw->clr_int_base, fw->clr_int_bar); + + out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + static void get_board_id(void *vsd, char *board_id) + { + int i; + + #define VSD_OFFSET_SIG1 0x00 + #define VSD_OFFSET_SIG2 0xde + #define VSD_OFFSET_MLX_BOARD_ID 0xd0 + #define VSD_OFFSET_TS_BOARD_ID 0x20 + + #define VSD_SIGNATURE_TOPSPIN 0x5ad + + memset(board_id, 0, MLX4_BOARD_ID_LEN); + + if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && + be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { + strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); + } else { + /* + * The board ID is a string but the firmware byte + * swaps each 4-byte word before passing it back to + * us. Therefore we need to swab it before printing. + */ + for (i = 0; i < 4; ++i) + ((u32 *) board_id)[i] = + swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); + } + } + + int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) + { + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err; + + #define QUERY_ADAPTER_OUT_SIZE 0x100 + #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 + #define QUERY_ADAPTER_VSD_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, + MLX4_CMD_TIME_CLASS_A); + if (err) + goto out; + + MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + + get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, + adapter->board_id); + + out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) + { + struct mlx4_cmd_mailbox *mailbox; + __be32 *inbox; + int err; + + #define INIT_HCA_IN_SIZE 0x200 + #define INIT_HCA_VERSION_OFFSET 0x000 + #define INIT_HCA_VERSION 2 + #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e + #define INIT_HCA_FLAGS_OFFSET 0x014 + #define INIT_HCA_QPC_OFFSET 0x020 + #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) + #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) + #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) + #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) + #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) + #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) + #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) + #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) + #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) + #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) + #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) + #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) + #define INIT_HCA_MCAST_OFFSET 0x0c0 + #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) + #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) + #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) + #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) + #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) + #define INIT_HCA_TPT_OFFSET 0x0f0 + #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) + #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) + #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) + #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) + #define INIT_HCA_UAR_OFFSET 0x120 + #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) + #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + memset(inbox, 0, INIT_HCA_IN_SIZE); + + *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; + + *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = + (ilog2(cache_line_size()) - 4) << 5; + + #if defined(__LITTLE_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); + #elif defined(__BIG_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); + #else + #error Host endianness not defined + #endif + /* Check port for UD address vector: */ + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); + + /* Enable IPoIB checksumming if we can: */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); + + /* Enable QoS support if module parameter set */ + if (enable_qos) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); + + /* enable counters */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); + + /* QPC/EEC/CQC/EQC/RDMARC attributes */ + + MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); + MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); + MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); + MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); + MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); + MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); + + /* multicast attributes */ + + MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) + MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); + MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + + /* TPT attributes */ + + MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); + MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); + + /* UAR attributes */ + + MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000); + + if (err) + mlx4_err(dev, "INIT_HCA returns %d\n", err); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) + { + struct mlx4_cmd_mailbox *mailbox; + u32 *inbox; + int err; + u32 flags; + u16 field; + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + #define INIT_PORT_IN_SIZE 256 + #define INIT_PORT_FLAGS_OFFSET 0x00 + #define INIT_PORT_FLAG_SIG (1 << 18) + #define INIT_PORT_FLAG_NG (1 << 17) + #define INIT_PORT_FLAG_G0 (1 << 16) + #define INIT_PORT_VL_SHIFT 4 + #define INIT_PORT_PORT_WIDTH_SHIFT 8 + #define INIT_PORT_MTU_OFFSET 0x04 + #define INIT_PORT_MAX_GID_OFFSET 0x06 + #define INIT_PORT_MAX_PKEY_OFFSET 0x0a + #define INIT_PORT_GUID0_OFFSET 0x10 + #define INIT_PORT_NODE_GUID_OFFSET 0x18 + #define INIT_PORT_SI_GUID_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + memset(inbox, 0, INIT_PORT_IN_SIZE); + + flags = 0; + flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; + flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; + MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); + + field = 128 << dev->caps.ib_mtu_cap[port]; + MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); + field = dev->caps.gid_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); + field = dev->caps.pkey_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A); + + mlx4_free_cmd_mailbox(dev, mailbox); + } else + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A); + + return err; + } + EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); + + int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) + { + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000); + } + EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); + + int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) + { + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000); + } + + int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) + { + int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, + MLX4_CMD_SET_ICM_SIZE, + MLX4_CMD_TIME_CLASS_A); + if (ret) + return ret; + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + return 0; + } + + int mlx4_NOP(struct mlx4_dev *dev) + { + /* Input modifier of 0x1f means "finish as soon as possible." */ + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); + } + + #define MLX4_WOL_SETUP_MODE (5 << 28) + int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) + { + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; + + return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A); + } + EXPORT_SYMBOL_GPL(mlx4_wol_read); + + int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) + { + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; + + return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, + MLX4_CMD_TIME_CLASS_A); + } + EXPORT_SYMBOL_GPL(mlx4_wol_write); diff --combined drivers/net/ethernet/mellanox/mlx4/fw.h index bf5ec22,1e8ecc3..bf5ec22 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@@ -93,8 -93,6 +93,8 @@@ struct mlx4_dev_cap int max_mcgs; int reserved_pds; int max_pds; + int reserved_xrcds; + int max_xrcds; int qpc_entry_sz; int rdmarc_entry_sz; int altc_entry_sz; diff --combined drivers/net/ethernet/mellanox/mlx4/main.c index 660b691,f0ee35d..660b691 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@@ -220,10 -220,6 +220,10 @@@ static int mlx4_dev_cap(struct mlx4_de dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_pds = dev_cap->reserved_pds; + dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? + dev_cap->reserved_xrcds : 0; + dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? + dev_cap->max_xrcds : 0; dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); @@@ -916,18 -912,11 +916,18 @@@ static int mlx4_setup_hca(struct mlx4_d goto err_kar_unmap; }
+ err = mlx4_init_xrcd_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize " + "reliable connection domain table, aborting.\n"); + goto err_pd_table_free; + } + err = mlx4_init_mr_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "memory region table, aborting.\n"); - goto err_pd_table_free; + goto err_xrcd_table_free; }
err = mlx4_init_eq_table(dev); @@@ -1044,9 -1033,6 +1044,9 @@@ err_eq_table_free err_mr_table_free: mlx4_cleanup_mr_table(dev);
+err_xrcd_table_free: + mlx4_cleanup_xrcd_table(dev); + err_pd_table_free: mlx4_cleanup_pd_table(dev);
@@@ -1369,7 -1355,6 +1369,7 @@@ err_port mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mr_table(dev); + mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); mlx4_cleanup_uar_table(dev);
@@@ -1431,7 -1416,6 +1431,7 @@@ static void mlx4_remove_one(struct pci_ mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mr_table(dev); + mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev);
iounmap(priv->kar); diff --combined drivers/net/ethernet/mellanox/mlx4/mlx4.h index fee2d05,a2fcd84..fee2d05 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@@ -335,7 -335,6 +335,7 @@@ struct mlx4_priv struct mlx4_cmd cmd;
struct mlx4_bitmap pd_bitmap; + struct mlx4_bitmap xrcd_bitmap; struct mlx4_uar_table uar_table; struct mlx4_mr_table mr_table; struct mlx4_cq_table cq_table; @@@ -385,7 -384,6 +385,7 @@@ int mlx4_alloc_eq_table(struct mlx4_de void mlx4_free_eq_table(struct mlx4_dev *dev);
int mlx4_init_pd_table(struct mlx4_dev *dev); +int mlx4_init_xrcd_table(struct mlx4_dev *dev); int mlx4_init_uar_table(struct mlx4_dev *dev); int mlx4_init_mr_table(struct mlx4_dev *dev); int mlx4_init_eq_table(struct mlx4_dev *dev); @@@ -395,7 -393,6 +395,7 @@@ int mlx4_init_srq_table(struct mlx4_de int mlx4_init_mcg_table(struct mlx4_dev *dev);
void mlx4_cleanup_pd_table(struct mlx4_dev *dev); +void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev); void mlx4_cleanup_uar_table(struct mlx4_dev *dev); void mlx4_cleanup_mr_table(struct mlx4_dev *dev); void mlx4_cleanup_eq_table(struct mlx4_dev *dev); diff --combined drivers/net/ethernet/mellanox/mlx4/mr.c index ab639cf,9c188bd..ab639cf --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@@ -139,7 -139,7 +139,7 @@@ static int mlx4_buddy_init(struct mlx4_
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), GFP_KERNEL); - buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *), + buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); if (!buddy->bits || !buddy->num_free) goto err_out; diff --combined drivers/net/ethernet/mellanox/mlx4/pd.c index 3736163,1286b88..3736163 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@@ -61,24 -61,6 +61,24 @@@ void mlx4_pd_free(struct mlx4_dev *dev } EXPORT_SYMBOL_GPL(mlx4_pd_free);
+int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap); + if (*xrcdn == -1) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc); + +void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn); +} +EXPORT_SYMBOL_GPL(mlx4_xrcd_free); + int mlx4_init_pd_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@@ -92,18 -74,6 +92,18 @@@ void mlx4_cleanup_pd_table(struct mlx4_ mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); }
+int mlx4_init_xrcd_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16), + (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0); +} + +void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap); +}
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) { diff --combined drivers/net/ethernet/mellanox/mlx4/port.c index 0000000,163a314..836338a mode 000000,100644..100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@@ -1,0 -1,488 +1,492 @@@ + /* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + #include <linux/errno.h> + #include <linux/if_ether.h> + + #include <linux/mlx4/cmd.h> + + #include "mlx4.h" + + #define MLX4_MAC_VALID (1ull << 63) + #define MLX4_MAC_MASK 0xffffffffffffULL + + #define MLX4_VLAN_VALID (1u << 31) + #define MLX4_VLAN_MASK 0xfff + + void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) + { + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + table->entries[i] = 0; + table->refs[i] = 0; + } + table->max = 1 << dev->caps.log_num_macs; + table->total = 0; + } + + void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) + { + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + table->entries[i] = 0; + table->refs[i] = 0; + } + table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; + table->total = 0; + } + + static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, + __be64 *entries) + { + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); + + in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, + u64 mac, int *qpn, u8 reserve) + { + struct mlx4_qp qp; + u8 gid[16] = {0}; + int err; + + if (reserve) { + err = mlx4_qp_reserve_range(dev, 1, 1, qpn); + if (err) { + mlx4_err(dev, "Failed to reserve qp for mac registration\n"); + return err; + } + } + qp.qpn = *qpn; + + mac &= 0xffffffffffffULL; + mac = cpu_to_be64(mac << 16); + memcpy(&gid[10], &mac, ETH_ALEN); + gid[5] = port; + gid[7] = MLX4_UC_STEER << 1; + + err = mlx4_qp_attach_common(dev, &qp, gid, 0, + MLX4_PROT_ETH, MLX4_UC_STEER); + if (err && reserve) + mlx4_qp_release_range(dev, *qpn, 1); + + return err; + } + + static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, + u64 mac, int qpn, u8 free) + { + struct mlx4_qp qp; + u8 gid[16] = {0}; + + qp.qpn = qpn; + mac &= 0xffffffffffffULL; + mac = cpu_to_be64(mac << 16); + memcpy(&gid[10], &mac, ETH_ALEN); + gid[5] = port; + gid[7] = MLX4_UC_STEER << 1; + + mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER); + if (free) + mlx4_qp_release_range(dev, qpn, 1); + } + + int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) + { + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + struct mlx4_mac_entry *entry; + int i, err = 0; + int free = -1; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); - if (!err) { - entry = kmalloc(sizeof *entry, GFP_KERNEL); - if (!entry) { - mlx4_uc_steer_release(dev, port, mac, *qpn, 1); - return -ENOMEM; - } - entry->mac = mac; - err = radix_tree_insert(&info->mac_tree, *qpn, entry); - if (err) { - mlx4_uc_steer_release(dev, port, mac, *qpn, 1); - return err; - } - } else ++ if (err) + return err; ++ ++ entry = kmalloc(sizeof *entry, GFP_KERNEL); ++ if (!entry) { ++ mlx4_uc_steer_release(dev, port, mac, *qpn, 1); ++ return -ENOMEM; ++ } ++ ++ entry->mac = mac; ++ err = radix_tree_insert(&info->mac_tree, *qpn, entry); ++ if (err) { ++ kfree(entry); ++ mlx4_uc_steer_release(dev, port, mac, *qpn, 1); ++ return err; ++ } + } ++ + mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); ++ + mutex_lock(&table->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { + if (free < 0 && !table->refs[i]) { + free = i; + continue; + } + + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + /* MAC already registered, increase references count */ + ++table->refs[i]; + goto out; + } + } + + if (free < 0) { + err = -ENOMEM; + goto out; + } + + mlx4_dbg(dev, "Free MAC index is %d\n", free); + + if (table->total == table->max) { + /* No free mac entries */ + err = -ENOSPC; + goto out; + } + + /* Register new MAC */ + table->refs[free] = 1; + table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac); + table->refs[free] = 0; + table->entries[free] = 0; + goto out; + } + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) + *qpn = info->base_qpn + free; + ++table->total; + out: + mutex_unlock(&table->mutex); + return err; + } + EXPORT_SYMBOL_GPL(mlx4_register_mac); + + static int validate_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, int index) + { + int err = 0; + + if (index < 0 || index >= table->max || !table->entries[index]) { + mlx4_warn(dev, "No valid Mac entry for the given index\n"); + err = -EINVAL; + } + return err; + } + + static int find_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, u64 mac) + { + int i; + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) + return i; + } + /* Mac not found */ + return -EINVAL; + } + + void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) + { + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int index = qpn - info->base_qpn; + struct mlx4_mac_entry *entry; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + entry = radix_tree_lookup(&info->mac_tree, qpn); + if (entry) { + mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1); + radix_tree_delete(&info->mac_tree, qpn); + index = find_index(dev, table, entry->mac); + kfree(entry); + } + } + + mutex_lock(&table->mutex); + + if (validate_index(dev, table, index)) + goto out; + + /* Check whether this address has reference count */ + if (!(--table->refs[index])) { + table->entries[index] = 0; + mlx4_set_port_mac_table(dev, port, table->entries); + --table->total; + } + out: + mutex_unlock(&table->mutex); + } + EXPORT_SYMBOL_GPL(mlx4_unregister_mac); + + int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap) + { + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int index = qpn - info->base_qpn; + struct mlx4_mac_entry *entry; + int err; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + entry = radix_tree_lookup(&info->mac_tree, qpn); + if (!entry) + return -EINVAL; + index = find_index(dev, table, entry->mac); + mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0); + entry->mac = new_mac; + err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0); + if (err || index < 0) + return err; + } + + mutex_lock(&table->mutex); + + err = validate_index(dev, table, index); + if (err) + goto out; + + table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); + table->entries[index] = 0; + } + out: + mutex_unlock(&table->mutex); + return err; + } + EXPORT_SYMBOL_GPL(mlx4_replace_mac); + static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, + __be32 *entries) + { + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); + in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; + } + + int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) + { + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int i; + + for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) { + if (table->refs[i] && + (vid == (MLX4_VLAN_MASK & + be32_to_cpu(table->entries[i])))) { + /* VLAN already registered, increase reference count */ + *idx = i; + return 0; + } + } + + return -ENOENT; + } + EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); + + int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) + { + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int i, err = 0; + int free = -1; + + mutex_lock(&table->mutex); + + if (table->total == table->max) { + /* No free vlan entries */ + err = -ENOSPC; + goto out; + } + + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { + if (free < 0 && (table->refs[i] == 0)) { + free = i; + continue; + } + + if (table->refs[i] && + (vlan == (MLX4_VLAN_MASK & + be32_to_cpu(table->entries[i])))) { + /* Vlan already registered, increase references count */ + *index = i; + ++table->refs[i]; + goto out; + } + } + + if (free < 0) { + err = -ENOMEM; + goto out; + } + + /* Register new MAC */ + table->refs[free] = 1; + table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); + + err = mlx4_set_port_vlan_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); + table->refs[free] = 0; + table->entries[free] = 0; + goto out; + } + + *index = free; + ++table->total; + out: + mutex_unlock(&table->mutex); + return err; + } + EXPORT_SYMBOL_GPL(mlx4_register_vlan); + + void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) + { + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + + if (index < MLX4_VLAN_REGULAR) { + mlx4_warn(dev, "Trying to free special vlan index %d\n", index); + return; + } + + mutex_lock(&table->mutex); + if (!table->refs[index]) { + mlx4_warn(dev, "No vlan entry for index %d\n", index); + goto out; + } + if (--table->refs[index]) { + mlx4_dbg(dev, "Have more references for index %d," + "no need to modify vlan table\n", index); + goto out; + } + table->entries[index] = 0; + mlx4_set_port_vlan_table(dev, port, table->entries); + --table->total; + out: + mutex_unlock(&table->mutex); + } + EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); + + int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) + { + struct mlx4_cmd_mailbox *inmailbox, *outmailbox; + u8 *inbuf, *outbuf; + int err; + + inmailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inmailbox)) + return PTR_ERR(inmailbox); + + outmailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outmailbox)) { + mlx4_free_cmd_mailbox(dev, inmailbox); + return PTR_ERR(outmailbox); + } + + inbuf = inmailbox->buf; + outbuf = outmailbox->buf; + memset(inbuf, 0, 256); + memset(outbuf, 0, 256); + inbuf[0] = 1; + inbuf[1] = 1; + inbuf[2] = 1; + inbuf[3] = 1; + *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015); + *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); + + err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); + if (!err) + *caps = *(__be32 *) (outbuf + 84); + mlx4_free_cmd_mailbox(dev, inmailbox); + mlx4_free_cmd_mailbox(dev, outmailbox); + return err; + } + + int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) + { + struct mlx4_cmd_mailbox *mailbox; + int err; + + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memset(mailbox->buf, 0, 256); + + ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } diff --combined drivers/net/ethernet/mellanox/mlx4/qp.c index 51c5389,ec9350e..51c5389 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@@ -280,9 -280,6 +280,9 @@@ int mlx4_init_qp_table(struct mlx4_dev * We reserve 2 extra QPs per port for the special QPs. The * block of special QPs must be aligned to a multiple of 8, so * round up. + * + * We also reserve the MSB of the 24-bit QP number to indicate + * that a QP is an XRC QP. */ dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); diff --combined drivers/net/ethernet/mellanox/mlx4/srq.c index a20b141,3b07b80..a20b141 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@@ -40,20 -40,20 +40,20 @@@ struct mlx4_srq_context { __be32 state_logsize_srqn; u8 logstride; - u8 reserved1[3]; - u8 pg_offset; - u8 reserved2[3]; - u32 reserved3; + u8 reserved1; + __be16 xrcd; + __be32 pg_offset_cqn; + u32 reserved2; u8 log_page_size; - u8 reserved4[2]; + u8 reserved3[2]; u8 mtt_base_addr_h; __be32 mtt_base_addr_l; __be32 pd; __be16 limit_watermark; __be16 wqe_cnt; - u16 reserved5; + u16 reserved4; __be16 wqe_counter; - u32 reserved6; + u32 reserved5; __be64 db_rec_addr; };
@@@ -109,8 -109,8 +109,8 @@@ static int mlx4_QUERY_SRQ(struct mlx4_d MLX4_CMD_TIME_CLASS_A); }
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, - u64 db_rec, struct mlx4_srq *srq) +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, + struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; struct mlx4_cmd_mailbox *mailbox; @@@ -148,8 -148,6 +148,8 @@@ srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | srq->srqn); srq_context->logstride = srq->wqe_shift - 4; + srq_context->xrcd = cpu_to_be16(xrcd); + srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt); diff --combined drivers/net/ethernet/realtek/r8169.c index 0000000,aa39e77..92b45f0 mode 000000,100644..100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -1,0 -1,6219 +1,6243 @@@ + /* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen shuchen@realtek.com.tw + * Copyright (c) 2003 - 2007 Francois Romieu romieu@fr.zoreil.com + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + + #include <linux/module.h> + #include <linux/moduleparam.h> + #include <linux/pci.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/delay.h> + #include <linux/ethtool.h> + #include <linux/mii.h> + #include <linux/if_vlan.h> + #include <linux/crc32.h> + #include <linux/in.h> + #include <linux/ip.h> + #include <linux/tcp.h> + #include <linux/init.h> + #include <linux/interrupt.h> + #include <linux/dma-mapping.h> + #include <linux/pm_runtime.h> + #include <linux/firmware.h> + #include <linux/pci-aspm.h> + #include <linux/prefetch.h> + + #include <asm/system.h> + #include <asm/io.h> + #include <asm/irq.h> + + #define RTL8169_VERSION "2.3LK-NAPI" + #define MODULENAME "r8169" + #define PFX MODULENAME ": " + + #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" + #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" + #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" + #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" + #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" + #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" + #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" + #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" + + #ifdef RTL8169_DEBUG + #define assert(expr) \ + if (!(expr)) { \ + printk( "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__func__,__LINE__); \ + } + #define dprintk(fmt, args...) \ + do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) + #else + #define assert(expr) do {} while (0) + #define dprintk(fmt, args...) do {} while (0) + #endif /* RTL8169_DEBUG */ + + #define R8169_MSG_DEFAULT \ + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) + + #define TX_BUFFS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) + + /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ + static const int multicast_filter_limit = 32; + + /* MAC address length */ + #define MAC_ADDR_LEN 6 + + #define MAX_READ_REQUEST_SHIFT 12 + #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ + #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ + #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + + #define R8169_REGS_SIZE 256 + #define R8169_NAPI_WEIGHT 64 + #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ + #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ + #define RX_BUF_SIZE 1536 /* Rx Buffer size */ + #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) + #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + + #define RTL8169_TX_TIMEOUT (6*HZ) + #define RTL8169_PHY_TIMEOUT (10*HZ) + + #define RTL_EEPROM_SIG cpu_to_le32(0x8129) + #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff) + #define RTL_EEPROM_SIG_ADDR 0x0000 + + /* write/read MMIO register */ + #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) + #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) + #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) + #define RTL_R8(reg) readb (ioaddr + (reg)) + #define RTL_R16(reg) readw (ioaddr + (reg)) + #define RTL_R32(reg) readl (ioaddr + (reg)) + + enum mac_version { + RTL_GIGA_MAC_VER_01 = 0, + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_15, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_NONE = 0xff, + }; + + enum rtl_tx_desc_version { + RTL_TD_0 = 0, + RTL_TD_1 = 1, + }; + + #define JUMBO_1K ETH_DATA_LEN + #define JUMBO_4K (4*1024 - ETH_HLEN - 2) + #define JUMBO_6K (6*1024 - ETH_HLEN - 2) + #define JUMBO_7K (7*1024 - ETH_HLEN - 2) + #define JUMBO_9K (9*1024 - ETH_HLEN - 2) + + #define _R(NAME,TD,FW,SZ,B) { \ + .name = NAME, \ + .txd_version = TD, \ + .fw_name = FW, \ + .jumbo_max = SZ, \ + .jumbo_tx_csum = B \ + } + + static const struct { + const char *name; + enum rtl_tx_desc_version txd_version; + const char *fw_name; + u16 jumbo_max; + bool jumbo_tx_csum; + } rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_01] = + _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_02] = + _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_03] = + _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_04] = + _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_05] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_06] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_08] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_09] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_10] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_11] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_12] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_13] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_14] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_15] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_16] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_17] = + _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_18] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_19] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_20] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_21] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_22] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_23] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_24] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_25] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_26] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_27] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_28] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_29] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_30] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_31] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_32] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_33] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_34] = + _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_35] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_36] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, + JUMBO_9K, false), + }; + #undef _R + + enum cfg_version { + RTL_CFG_0 = 0x00, + RTL_CFG_1, + RTL_CFG_2 + }; + + static void rtl_hw_start_8169(struct net_device *); + static void rtl_hw_start_8168(struct net_device *); + static void rtl_hw_start_8101(struct net_device *); + + static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, + PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, + { 0x0001, 0x8168, + PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 }, + {0,}, + }; + + MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + + static int rx_buf_sz = 16383; + static int use_dac; + static struct { + u32 msg_enable; + } debug = { -1 }; + + enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, + #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ + #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, + #define RX128_INT_EN (1 << 15) /* 8111c and later */ + #define RX_MULTI_EN (1 << 14) /* 8111c only */ + #define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ + #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) + #define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ + #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + RxMissed = 0x4c, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + MultiIntr = 0x5c, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + + #define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + + #define TxPacketMax (8064 >> 7) + #define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + FuncForceEvent = 0xfc, + }; + + enum rtl8110_registers { + TBICSR = 0x64, + TBI_ANAR = 0x68, + TBI_LPAR = 0x6a, + }; + + enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, + #define CSIAR_FLAG 0x80000000 + #define CSIAR_WRITE_CMD 0x80000000 + #define CSIAR_BYTE_ENABLE 0x0f + #define CSIAR_BYTE_ENABLE_SHIFT 12 + #define CSIAR_ADDR_MASK 0x0fff + PMCH = 0x6f, + EPHYAR = 0x80, + #define EPHYAR_FLAG 0x80000000 + #define EPHYAR_WRITE_CMD 0x80000000 + #define EPHYAR_REG_MASK 0x1f + #define EPHYAR_REG_SHIFT 16 + #define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, + #define PFM_EN (1 << 6) + DBG_REG = 0xd1, + #define FIX_NAK_1 (1 << 4) + #define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, + #define NOW_IS_OOB (1 << 7) + #define EN_NDP (1 << 3) + #define EN_OOB_RESET (1 << 2) + EFUSEAR = 0xdc, + #define EFUSEAR_FLAG 0x80000000 + #define EFUSEAR_WRITE_CMD 0x80000000 + #define EFUSEAR_READ_CMD 0x00000000 + #define EFUSEAR_REG_MASK 0x03ff + #define EFUSEAR_REG_SHIFT 8 + #define EFUSEAR_DATA_MASK 0xff + }; + + enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, + #define ERIAR_FLAG 0x80000000 + #define ERIAR_WRITE_CMD 0x80000000 + #define ERIAR_READ_CMD 0x00000000 + #define ERIAR_ADDR_BYTE_ALIGN 4 + #define ERIAR_TYPE_SHIFT 16 + #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) + #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) + #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) + #define ERIAR_MASK_SHIFT 12 + #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) + #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) + #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ + #define OCPDR_WRITE_CMD 0x80000000 + #define OCPDR_READ_CMD 0x00000000 + #define OCPDR_REG_MASK 0x7f + #define OCPDR_GPHY_REG_SHIFT 16 + #define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, + #define OCPAR_FLAG 0x80000000 + #define OCPAR_GPHY_WRITE_CMD 0x8000f060 + #define OCPAR_GPHY_READ_CMD 0x0000f060 + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ + #define TXPLA_RST (1 << 29) + #define PWM_EN (1 << 22) + }; + + enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxBOVF = (1 << 24), + RxFOVF = (1 << 23), + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, + #define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + + /* TBICSR p.28 */ + TBIReset = 0x80000000, + TBILoopback = 0x40000000, + TBINwEnable = 0x20000000, + TBINwRestart = 0x10000000, + TBILinkOk = 0x02000000, + TBINwComplete = 0x01000000, + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), + INTT_0 = 0x0000, // 8168 + INTT_1 = 0x0001, // 8168 + INTT_2 = 0x0002, // 8168 + INTT_3 = 0x0003, // 8168 + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* _TBICSRBit */ + TBILinkOK = 0x02000000, + + /* DumpCounterCommand */ + CounterDump = 0x8, + }; + + enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ + }; + + /* Generic case. */ + enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ + #define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ + }; + + /* 8169, 8168b and 810x except 8102e. */ + enum rtl_tx_desc_bit_0 { + /* First doubleword. */ + #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ + }; + + /* 8102e, 8168c and beyond. */ + enum rtl_tx_desc_bit_1 { + /* Second doubleword. */ + #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IP_CS = (1 << 29), /* Calculate IP checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ + }; + + static const struct rtl_tx_desc_info { + struct { + u32 udp; + u32 tcp; + } checksum; + u16 mss_shift; + u16 opts_offset; + } tx_desc_info [] = { + [RTL_TD_0] = { + .checksum = { + .udp = TD0_IP_CS | TD0_UDP_CS, + .tcp = TD0_IP_CS | TD0_TCP_CS + }, + .mss_shift = TD0_MSS_SHIFT, + .opts_offset = 0 + }, + [RTL_TD_1] = { + .checksum = { + .udp = TD1_IP_CS | TD1_UDP_CS, + .tcp = TD1_IP_CS | TD1_TCP_CS + }, + .mss_shift = TD1_MSS_SHIFT, + .opts_offset = 1 + } + }; + + enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 2/2 */ + + #define RxProtoUDP (PID1) + #define RxProtoTCP (PID0) + #define RxProtoIP (PID1 | PID0) + #define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ + }; + + #define RsvdMask 0x3fffc000 + + struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; + }; + + struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; + }; + + struct ring_info { + struct sk_buff *skb; + u32 len; + u8 __pad[sizeof(void *) - sizeof(u32)]; + }; + + enum features { + RTL_FEATURE_WOL = (1 << 0), + RTL_FEATURE_MSI = (1 << 1), + RTL_FEATURE_GMII = (1 << 2), + }; + + struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; + }; + + struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct napi_struct napi; + spinlock_t lock; + u32 msg_enable; + u16 txd_version; + u16 mac_version; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_rx; + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + struct timer_list timer; + u16 cp_cmd; + u16 intr_event; + u16 napi_event; + u16 intr_mask; + + struct mdio_ops { + void (*write)(void __iomem *, int, int); + int (*read)(void __iomem *, int); + } mdio_ops; + + struct pll_power_ops { + void (*down)(struct rtl8169_private *); + void (*up)(struct rtl8169_private *); + } pll_power_ops; + + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); + } jumbo_ops; + + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + void (*phy_reset_enable)(struct rtl8169_private *tp); + void (*hw_start)(struct net_device *); + unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); + unsigned int (*link_ok)(void __iomem *); + int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); + struct delayed_work task; + unsigned features; + + struct mii_if_info mii; + struct rtl8169_counters counters; + u32 saved_wolopts; + u32 opts1_mask; + + struct rtl_fw { + const struct firmware *fw; + + #define RTL_VER_SIZE 32 + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; + } *rtl_fw; + #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) + }; + + MODULE_AUTHOR("Realtek and the Linux r8169 crew netdev@vger.kernel.org"); + MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); + module_param(use_dac, int, 0); + MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); + module_param_named(debug, debug.msg_enable, int, 0); + MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(RTL8169_VERSION); + MODULE_FIRMWARE(FIRMWARE_8168D_1); + MODULE_FIRMWARE(FIRMWARE_8168D_2); + MODULE_FIRMWARE(FIRMWARE_8168E_1); + MODULE_FIRMWARE(FIRMWARE_8168E_2); + MODULE_FIRMWARE(FIRMWARE_8168E_3); + MODULE_FIRMWARE(FIRMWARE_8105E_1); + MODULE_FIRMWARE(FIRMWARE_8168F_1); + MODULE_FIRMWARE(FIRMWARE_8168F_2); + + static int rtl8169_open(struct net_device *dev); + static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev); + static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance); + static int rtl8169_init_ring(struct net_device *dev); + static void rtl_hw_start(struct net_device *dev); + static int rtl8169_close(struct net_device *dev); + static void rtl_set_rx_mode(struct net_device *dev); + static void rtl8169_tx_timeout(struct net_device *dev); + static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); + static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, + void __iomem *, u32 budget); + static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); + static void rtl8169_down(struct net_device *dev); + static void rtl8169_rx_clear(struct rtl8169_private *tp); + static int rtl8169_poll(struct napi_struct *napi, int budget); + + static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) + { + int cap = pci_pcie_cap(pdev); + + if (cap) { + u16 ctl; + + pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); + ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force; + pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); + } + } + + static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) + { + void __iomem *ioaddr = tp->mmio_addr; + int i; + + RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + for (i = 0; i < 20; i++) { + udelay(100); + if (RTL_R32(OCPAR) & OCPAR_FLAG) + break; + } + return RTL_R32(OCPDR); + } + + static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) + { + void __iomem *ioaddr = tp->mmio_addr; + int i; + + RTL_W32(OCPDR, data); + RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + for (i = 0; i < 20; i++) { + udelay(100); + if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0) + break; + } + } + + static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) + { + void __iomem *ioaddr = tp->mmio_addr; + int i; + + RTL_W8(ERIDR, cmd); + RTL_W32(ERIAR, 0x800010e8); + msleep(2); + for (i = 0; i < 5; i++) { + udelay(100); + if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) + break; + } + + ocp_write(tp, 0x1, 0x30, 0x00000001); + } + + #define OOB_CMD_RESET 0x00 + #define OOB_CMD_DRIVER_START 0x05 + #define OOB_CMD_DRIVER_STOP 0x06 + + static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) + { + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; + } + + static void rtl8168_driver_start(struct rtl8169_private *tp) + { + u16 reg; + int i; + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + + reg = rtl8168_get_ocp_reg(tp); + + for (i = 0; i < 10; i++) { + msleep(10); + if (ocp_read(tp, 0x0f, reg) & 0x00000800) + break; + } + } + + static void rtl8168_driver_stop(struct rtl8169_private *tp) + { + u16 reg; + int i; + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + + reg = rtl8168_get_ocp_reg(tp); + + for (i = 0; i < 10; i++) { + msleep(10); + if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0) + break; + } + } + + static int r8168dp_check_dash(struct rtl8169_private *tp) + { + u16 reg = rtl8168_get_ocp_reg(tp); + + return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; + } + + static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value) + { + int i; + + RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff)); + + for (i = 20; i > 0; i--) { + /* + * Check if the RTL8169 has completed writing to the specified + * MII register. + */ + if (!(RTL_R32(PHYAR) & 0x80000000)) + break; + udelay(25); + } + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); + } + + static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr) + { + int i, value = -1; + + RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16); + + for (i = 20; i > 0; i--) { + /* + * Check if the RTL8169 has completed retrieving data from + * the specified MII register. + */ + if (RTL_R32(PHYAR) & 0x80000000) { + value = RTL_R32(PHYAR) & 0xffff; + break; + } + udelay(25); + } + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; + } + + static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data) + { + int i; + + RTL_W32(OCPDR, data | + ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (!(RTL_R32(OCPAR) & OCPAR_FLAG)) + break; + } + } + + static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value) + { + r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD | + (value & OCPDR_DATA_MASK)); + } + + static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr) + { + int i; + + r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (RTL_R32(OCPAR) & OCPAR_FLAG) + break; + } + + return RTL_R32(OCPDR) & OCPDR_DATA_MASK; + } + + #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + + static void r8168dp_2_mdio_start(void __iomem *ioaddr) + { + RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); + } + + static void r8168dp_2_mdio_stop(void __iomem *ioaddr) + { + RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); + } + + static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value) + { + r8168dp_2_mdio_start(ioaddr); + + r8169_mdio_write(ioaddr, reg_addr, value); + + r8168dp_2_mdio_stop(ioaddr); + } + + static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr) + { + int value; + + r8168dp_2_mdio_start(ioaddr); + + value = r8169_mdio_read(ioaddr, reg_addr); + + r8168dp_2_mdio_stop(ioaddr); + + return value; + } + + static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) + { + tp->mdio_ops.write(tp->mmio_addr, location, val); + } + + static int rtl_readphy(struct rtl8169_private *tp, int location) + { + return tp->mdio_ops.read(tp->mmio_addr, location); + } + + static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) + { + rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); + } + + static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) + { + int val; + + val = rtl_readphy(tp, reg_addr); + rtl_writephy(tp, reg_addr, (val | p) & ~m); + } + + static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, + int val) + { + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_writephy(tp, location, val); + } + + static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) + { + struct rtl8169_private *tp = netdev_priv(dev); + + return rtl_readphy(tp, location); + } + + static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value) + { + unsigned int i; + + RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG)) + break; + udelay(10); + } + } + + static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr) + { + u16 value = 0xffff; + unsigned int i; + + RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + for (i = 0; i < 100; i++) { + if (RTL_R32(EPHYAR) & EPHYAR_FLAG) { + value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK; + break; + } + udelay(10); + } + + return value; + } + + static void rtl_csi_write(void __iomem *ioaddr, int addr, int value) + { + unsigned int i; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(CSIAR) & CSIAR_FLAG)) + break; + udelay(10); + } + } + + static u32 rtl_csi_read(void __iomem *ioaddr, int addr) + { + u32 value = ~0x00; + unsigned int i; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + for (i = 0; i < 100; i++) { + if (RTL_R32(CSIAR) & CSIAR_FLAG) { + value = RTL_R32(CSIDR); + break; + } + udelay(10); + } + + return value; + } + + static + void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) + { + unsigned int i; + + BUG_ON((addr & 3) || (mask == 0)); + RTL_W32(ERIDR, val); + RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) + break; + udelay(100); + } + } + + static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type) + { + u32 value = ~0x00; + unsigned int i; + + RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + + for (i = 0; i < 100; i++) { + if (RTL_R32(ERIAR) & ERIAR_FLAG) { + value = RTL_R32(ERIDR); + break; + } + udelay(100); + } + + return value; + } + + static void + rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type) + { + u32 val; + + val = rtl_eri_read(ioaddr, addr, type); + rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); + } + + struct exgmac_reg { + u16 addr; + u16 mask; + u32 val; + }; + + static void rtl_write_exgmac_batch(void __iomem *ioaddr, + const struct exgmac_reg *r, int len) + { + while (len-- > 0) { + rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC); + r++; + } + } + + static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) + { + u8 value = 0xff; + unsigned int i; + + RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + for (i = 0; i < 300; i++) { + if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) { + value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK; + break; + } + udelay(100); + } + + return value; + } + + static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) + { + RTL_W16(IntrMask, 0x0000); + + RTL_W16(IntrStatus, 0xffff); + } + + static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(TBICSR) & TBIReset; + } + + static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) + { + return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; + } + + static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) + { + return RTL_R32(TBICSR) & TBILinkOk; + } + + static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr) + { + return RTL_R8(PHYstatus) & LinkStatus; + } + + static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); + } + + static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) + { + unsigned int val; + + val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; + rtl_writephy(tp, MII_BMCR, val & 0xffff); + } + + static void rtl_link_chg_patch(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + struct net_device *dev = tp->dev; + + if (!netif_running(dev)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x00000011, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x00000005, ERIAR_EXGMAC); + } else if (RTL_R8(PHYstatus) & _100bps) { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x0000001f, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x00000005, ERIAR_EXGMAC); + } else { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x0000001f, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x0000003f, ERIAR_EXGMAC); + } + /* Reset packet filter */ + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, + ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, + ERIAR_EXGMAC); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x00000011, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x00000005, ERIAR_EXGMAC); + } else { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x0000001f, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x0000003f, ERIAR_EXGMAC); + } + } + } + + static void __rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr, bool pm) + { + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + if (tp->link_ok(ioaddr)) { + rtl_link_chg_patch(tp); + /* This is to cancel a scheduled suspend if there's one. */ + if (pm) + pm_request_resume(&tp->pci_dev->dev); + netif_carrier_on(dev); + if (net_ratelimit()) + netif_info(tp, ifup, dev, "link up\n"); + } else { + netif_carrier_off(dev); + netif_info(tp, ifdown, dev, "link down\n"); + if (pm) + pm_schedule_suspend(&tp->pci_dev->dev, 100); + } + spin_unlock_irqrestore(&tp->lock, flags); + } + + static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) + { + __rtl8169_check_link_status(dev, tp, ioaddr, false); + } + + #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + + static u32 __rtl8169_get_wol(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + u8 options; + u32 wolopts = 0; + + options = RTL_R8(Config1); + if (!(options & PMEnable)) + return 0; + + options = RTL_R8(Config3); + if (options & LinkUp) + wolopts |= WAKE_PHY; + if (options & MagicPacket) + wolopts |= WAKE_MAGIC; + + options = RTL_R8(Config5); + if (options & UWF) + wolopts |= WAKE_UCAST; + if (options & BWF) + wolopts |= WAKE_BCAST; + if (options & MWF) + wolopts |= WAKE_MCAST; + + return wolopts; + } + + static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct rtl8169_private *tp = netdev_priv(dev); + + spin_lock_irq(&tp->lock); + + wol->supported = WAKE_ANY; + wol->wolopts = __rtl8169_get_wol(tp); + + spin_unlock_irq(&tp->lock); + } + + static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) + { + void __iomem *ioaddr = tp->mmio_addr; + unsigned int i; + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_ANY, Config1, PMEnable }, + { WAKE_PHY, Config3, LinkUp }, + { WAKE_MAGIC, Config3, MagicPacket }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake } + }; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + for (i = 0; i < ARRAY_SIZE(cfg); i++) { + u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(cfg[i].reg, options); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + } + + static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct rtl8169_private *tp = netdev_priv(dev); + + spin_lock_irq(&tp->lock); + + if (wol->wolopts) + tp->features |= RTL_FEATURE_WOL; + else + tp->features &= ~RTL_FEATURE_WOL; + __rtl8169_set_wol(tp, wol->wolopts); + spin_unlock_irq(&tp->lock); + + device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + + return 0; + } + + static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp) + { + return rtl_chip_infos[tp->mac_version].fw_name; + } + + static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) + { + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strcpy(info->driver, MODULENAME); + strcpy(info->version, RTL8169_VERSION); + strcpy(info->bus_info, pci_name(tp->pci_dev)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" : + rtl_fw->version); + } + + static int rtl8169_get_regs_len(struct net_device *dev) + { + return R8169_REGS_SIZE; + } + + static int rtl8169_set_speed_tbi(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 ignored) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int ret = 0; + u32 reg; + + reg = RTL_R32(TBICSR); + if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && + (duplex == DUPLEX_FULL)) { + RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); + } else if (autoneg == AUTONEG_ENABLE) + RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); + else { + netif_warn(tp, link, dev, + "incorrect speed setting refused in TBI mode\n"); + ret = -EOPNOTSUPP; + } + + return ret; + } + + static int rtl8169_set_speed_xmii(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 adv) + { + struct rtl8169_private *tp = netdev_priv(dev); + int giga_ctrl, bmcr; + int rc = -EINVAL; + + rtl_writephy(tp, 0x1f, 0x0000); + + if (autoneg == AUTONEG_ENABLE) { + int auto_nego; + + auto_nego = rtl_readphy(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + + if (adv & ADVERTISED_10baseT_Half) + auto_nego |= ADVERTISE_10HALF; + if (adv & ADVERTISED_10baseT_Full) + auto_nego |= ADVERTISE_10FULL; + if (adv & ADVERTISED_100baseT_Half) + auto_nego |= ADVERTISE_100HALF; + if (adv & ADVERTISED_100baseT_Full) + auto_nego |= ADVERTISE_100FULL; + + auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + giga_ctrl = rtl_readphy(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + /* The 8100e/8101e/8102e do Fast Ethernet only. */ + if (tp->mii.supports_gmii) { + if (adv & ADVERTISED_1000baseT_Half) + giga_ctrl |= ADVERTISE_1000HALF; + if (adv & ADVERTISED_1000baseT_Full) + giga_ctrl |= ADVERTISE_1000FULL; + } else if (adv & (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) { + netif_info(tp, link, dev, + "PHY does not support 1000Mbps\n"); + goto out; + } + + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + + rtl_writephy(tp, MII_ADVERTISE, auto_nego); + rtl_writephy(tp, MII_CTRL1000, giga_ctrl); + } else { + giga_ctrl = 0; + + if (speed == SPEED_10) + bmcr = 0; + else if (speed == SPEED_100) + bmcr = BMCR_SPEED100; + else + goto out; + + if (duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + + rtl_writephy(tp, MII_BMCR, bmcr); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { + rtl_writephy(tp, 0x17, 0x2138); + rtl_writephy(tp, 0x0e, 0x0260); + } else { + rtl_writephy(tp, 0x17, 0x2108); + rtl_writephy(tp, 0x0e, 0x0000); + } + } + + rc = 0; + out: + return rc; + } + + static int rtl8169_set_speed(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 advertising) + { + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); + if (ret < 0) + goto out; + + if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && + (advertising & ADVERTISED_1000baseT_Full)) { + mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); + } + out: + return ret; + } + + static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + int ret; + + del_timer_sync(&tp->timer); + + spin_lock_irqsave(&tp->lock, flags); + ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), + cmd->duplex, cmd->advertising); + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; + } + + static u32 rtl8169_fix_features(struct net_device *dev, u32 features) + { + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > JUMBO_1K && + !rtl_chip_infos[tp->mac_version].jumbo_tx_csum) + features &= ~NETIF_F_IP_CSUM; + + return features; + } + + static int rtl8169_set_features(struct net_device *dev, u32 features) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (dev->features & NETIF_F_HW_VLAN_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; + } + + static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, + struct sk_buff *skb) + { + return (vlan_tx_tag_present(skb)) ? + TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; + } + + static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) + { + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); + + desc->opts2 = 0; + } + + static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 status; + + cmd->supported = + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + + status = RTL_R32(TBICSR); + cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; + cmd->autoneg = !!(status & TBINwEnable); + + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; /* Always set */ + + return 0; + } + + static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct rtl8169_private *tp = netdev_priv(dev); + + return mii_ethtool_gset(&tp->mii, cmd); + } + + static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + int rc; + + spin_lock_irqsave(&tp->lock, flags); + + rc = tp->get_settings(dev, cmd); + + spin_unlock_irqrestore(&tp->lock, flags); + return rc; + } + + static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) + { + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + + if (regs->len > R8169_REGS_SIZE) + regs->len = R8169_REGS_SIZE; + + spin_lock_irqsave(&tp->lock, flags); + memcpy_fromio(p, tp->mmio_addr, regs->len); + spin_unlock_irqrestore(&tp->lock, flags); + } + + static u32 rtl8169_get_msglevel(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->msg_enable; + } + + static void rtl8169_set_msglevel(struct net_device *dev, u32 value) + { + struct rtl8169_private *tp = netdev_priv(dev); + + tp->msg_enable = value; + } + + static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", + }; + + static int rtl8169_get_sset_count(struct net_device *dev, int sset) + { + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } + } + + static void rtl8169_update_counters(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + struct rtl8169_counters *counters; + dma_addr_t paddr; + u32 cmd; + int wait = 1000; + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. + */ + if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) + return; + + counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL); + if (!counters) + return; + + RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(CounterAddrLow, cmd); + RTL_W32(CounterAddrLow, cmd | CounterDump); + + while (wait--) { + if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) { + memcpy(&tp->counters, counters, sizeof(*counters)); + break; + } + udelay(10); + } + + RTL_W32(CounterAddrLow, 0); + RTL_W32(CounterAddrHigh, 0); + + dma_free_coherent(d, sizeof(*counters), counters, paddr); + } + + static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) + { + struct rtl8169_private *tp = netdev_priv(dev); + + ASSERT_RTNL(); + + rtl8169_update_counters(dev); + + data[0] = le64_to_cpu(tp->counters.tx_packets); + data[1] = le64_to_cpu(tp->counters.rx_packets); + data[2] = le64_to_cpu(tp->counters.tx_errors); + data[3] = le32_to_cpu(tp->counters.rx_errors); + data[4] = le16_to_cpu(tp->counters.rx_missed); + data[5] = le16_to_cpu(tp->counters.align_errors); + data[6] = le32_to_cpu(tp->counters.tx_one_collision); + data[7] = le32_to_cpu(tp->counters.tx_multi_collision); + data[8] = le64_to_cpu(tp->counters.rx_unicast); + data[9] = le64_to_cpu(tp->counters.rx_broadcast); + data[10] = le32_to_cpu(tp->counters.rx_multicast); + data[11] = le16_to_cpu(tp->counters.tx_aborted); + data[12] = le16_to_cpu(tp->counters.tx_underun); + } + + static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) + { + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } + } + + static const struct ethtool_ops rtl8169_ethtool_ops = { + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_settings = rtl8169_get_settings, + .set_settings = rtl8169_set_settings, + .get_msglevel = rtl8169_get_msglevel, + .set_msglevel = rtl8169_set_msglevel, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + }; + + static void rtl8169_get_mac_version(struct rtl8169_private *tp, + struct net_device *dev, u8 default_version) + { + void __iomem *ioaddr = tp->mmio_addr; + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u32 mask; + u32 val; + int mac_version; + } mac_info[] = { + /* 8168F family. */ + { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, + { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, + { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 }, + { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, + { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, + { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, + { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, + { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, + { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, + { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, + { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, + { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, + { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, + { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 }, + { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, + { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, + { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, + { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 }, + { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, + { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR */ + { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, + { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, + + /* 8110 family. */ + { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, + { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, + { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, + { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, + { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, + { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, + + /* Catch-all */ + { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + u32 reg; + + reg = RTL_R32(TxConfig); + while ((reg & p->mask) != p->val) + p++; + tp->mac_version = p->mac_version; + + if (tp->mac_version == RTL_GIGA_MAC_NONE) { + netif_notice(tp, probe, dev, + "unknown MAC, using family default\n"); + tp->mac_version = default_version; + } + } + + static void rtl8169_print_mac_version(struct rtl8169_private *tp) + { + dprintk("mac_version = 0x%02x\n", tp->mac_version); + } + + struct phy_reg { + u16 reg; + u16 val; + }; + + static void rtl_writephy_batch(struct rtl8169_private *tp, + const struct phy_reg *regs, int len) + { + while (len-- > 0) { + rtl_writephy(tp, regs->reg, regs->val); + regs++; + } + } + + #define PHY_READ 0x00000000 + #define PHY_DATA_OR 0x10000000 + #define PHY_DATA_AND 0x20000000 + #define PHY_BJMPN 0x30000000 + #define PHY_READ_EFUSE 0x40000000 + #define PHY_READ_MAC_BYTE 0x50000000 + #define PHY_WRITE_MAC_BYTE 0x60000000 + #define PHY_CLEAR_READCOUNT 0x70000000 + #define PHY_WRITE 0x80000000 + #define PHY_READCOUNT_EQ_SKIP 0x90000000 + #define PHY_COMP_EQ_SKIPN 0xa0000000 + #define PHY_COMP_NEQ_SKIPN 0xb0000000 + #define PHY_WRITE_PREVIOUS 0xc0000000 + #define PHY_SKIPN 0xd0000000 + #define PHY_DELAY_MS 0xe0000000 + #define PHY_WRITE_ERI_WORD 0xf0000000 + + struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; + } __packed; + + #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) + + static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) + { + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + char *version = rtl_fw->version; + bool rc = false; + + if (fw->size < FW_OPCODE_SIZE) + goto out; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + goto out; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + goto out; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + goto out; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + goto out; + + memcpy(version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + goto out; + + strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + version[RTL_VER_SIZE - 1] = 0; + + rc = true; + out: + return rc; + } + + static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, + struct rtl_fw_phy_action *pa) + { + bool rc = false; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 regno = (action & 0x0fff0000) >> 16; + + switch(action & 0xf0000000) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_READ_EFUSE: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_BJMPN: + if (regno > index) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: + default: + netif_err(tp, ifup, tp->dev, + "Invalid action 0x%08x\n", action); + goto out; + } + } + rc = true; + out: + return rc; + } + + static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) + { + struct net_device *dev = tp->dev; + int rc = -EINVAL; + + if (!rtl_fw_format_ok(tp, rtl_fw)) { + netif_err(tp, ifup, dev, "invalid firwmare\n"); + goto out; + } + + if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action)) + rc = 0; + out: + return rc; + } + + static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) + { + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + u32 predata, count; + size_t index; + + predata = count = 0; + + for (index = 0; index < pa->size; ) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + if (!action) + break; + + switch(action & 0xf0000000) { + case PHY_READ: + predata = rtl_readphy(tp, regno); + count++; + index++; + break; + case PHY_DATA_OR: + predata |= data; + index++; + break; + case PHY_DATA_AND: + predata &= data; + index++; + break; + case PHY_BJMPN: + index -= regno; + break; + case PHY_READ_EFUSE: + predata = rtl8168d_efuse_read(tp->mmio_addr, regno); + index++; + break; + case PHY_CLEAR_READCOUNT: + count = 0; + index++; + break; + case PHY_WRITE: + rtl_writephy(tp, regno, data); + index++; + break; + case PHY_READCOUNT_EQ_SKIP: + index += (count == data) ? 2 : 1; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + index++; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + index++; + break; + case PHY_WRITE_PREVIOUS: + rtl_writephy(tp, regno, predata); + index++; + break; + case PHY_SKIPN: + index += regno + 1; + break; + case PHY_DELAY_MS: + mdelay(data); + index++; + break; + + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: + default: + BUG(); + } + } + } + + static void rtl_release_firmware(struct rtl8169_private *tp) + { + if (!IS_ERR_OR_NULL(tp->rtl_fw)) { + release_firmware(tp->rtl_fw->fw); + kfree(tp->rtl_fw); + } + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + } + + static void rtl_apply_firmware(struct rtl8169_private *tp) + { + struct rtl_fw *rtl_fw = tp->rtl_fw; + + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + if (!IS_ERR_OR_NULL(rtl_fw)) + rtl_phy_write_fw(tp, rtl_fw); + } + + static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) + { + if (rtl_readphy(tp, reg) != val) + netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); + else + rtl_apply_firmware(tp); + } + + static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x01, 0x90d0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) + { + struct pci_dev *pdev = tp->pci_dev; + + if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) || + (pdev->subsystem_device != 0xe000)) + return; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_writephy(tp, 0x10, 0xf01b); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8169scd_hw_phy_config_quirk(tp); + } + + static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_patchphy(tp, 0x16, 1 << 0); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0000 }, + { 0x1d, 0x0f00 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x1ec8 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp) + { + rtl8168c_3_hw_phy_config(tp); + } + + static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + void __iomem *ioaddr = tp->mmio_addr; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef); + rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00); + + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x6662 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x6662 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* RSET couple improve */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0d, 0x0300); + rtl_patchphy(tp, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); + + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + void __iomem *ioaddr = tp->mmio_addr; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x2642 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x2642 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + + /* Switching regulator Slew rate */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0f, 0x0017); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); + + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x0023 }, + { 0x16, 0x0000 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x002d }, + { 0x18, 0x0040 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_patchphy(tp, 0x0d, 1 << 5); + } + + static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b80 }, + { 0x06, 0xc896 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + + /* Update PFM & 10M TX idle timer */ + { 0x1f, 0x0007 }, + { 0x1e, 0x002f }, + { 0x15, 0x1919 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* DCO enable for 10M IDLE Power */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0023); + rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* For impedance matching */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100); + rtl_writephy(tp, 0x1f, 0x0006); + rtl_writephy(tp, 0x00, 0x5a00); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + } + + static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0004 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0002 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Green Setting */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b5b }, + { 0x06, 0x9222 }, + { 0x05, 0x8b6d }, + { 0x06, 0x8000 }, + { 0x05, 0x8b76 }, + { 0x06, 0x8000 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* EEE setting */ + rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, + ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); ++ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00fb }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) + { + rtl_apply_firmware(tp); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + } + + static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x11, 1 << 12); + rtl_patchphy(tp, 0x19, 1 << 13); + rtl_patchphy(tp, 0x10, 1 << 15); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) + { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0005 }, + { 0x1a, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0004 }, + { 0x1c, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x15, 0x7701 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + static void rtl_hw_phy_config(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_print_mac_version(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + break; + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + rtl8169s_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_04: + rtl8169sb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_05: + rtl8169scd_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_06: + rtl8169sce_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + rtl8102e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_11: + rtl8168bb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_12: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_17: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_18: + rtl8168cp_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_19: + rtl8168c_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_20: + rtl8168c_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_21: + rtl8168c_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_22: + rtl8168c_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + rtl8168cp_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_25: + rtl8168d_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_26: + rtl8168d_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_27: + rtl8168d_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_28: + rtl8168d_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + rtl8105e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_31: + /* None. */ + break; + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl8168e_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl8168e_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_35: + rtl8168f_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_36: + rtl8168f_2_hw_phy_config(tp); + break; + + default: + break; + } + } + + static void rtl8169_phy_timer(unsigned long __opaque) + { + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8169_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->timer; + void __iomem *ioaddr = tp->mmio_addr; + unsigned long timeout = RTL8169_PHY_TIMEOUT; + + assert(tp->mac_version > RTL_GIGA_MAC_VER_01); + + spin_lock_irq(&tp->lock); + + if (tp->phy_reset_pending(tp)) { + /* + * A busy loop could burn quite a few cycles on nowadays CPU. + * Let's delay the execution of the timer for a few ticks. + */ + timeout = HZ/10; + goto out_mod_timer; + } + + if (tp->link_ok(ioaddr)) + goto out_unlock; + + netif_warn(tp, link, dev, "PHY reset until link up\n"); + + tp->phy_reset_enable(tp); + + out_mod_timer: + mod_timer(timer, jiffies + timeout); + out_unlock: + spin_unlock_irq(&tp->lock); + } + + #ifdef CONFIG_NET_POLL_CONTROLLER + /* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ + static void rtl8169_netpoll(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + disable_irq(pdev->irq); + rtl8169_interrupt(pdev->irq, dev); + enable_irq(pdev->irq); + } + #endif + + static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, + void __iomem *ioaddr) + { + iounmap(ioaddr); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); + } + + static void rtl8169_phy_reset(struct net_device *dev, + struct rtl8169_private *tp) + { + unsigned int i; + + tp->phy_reset_enable(tp); + for (i = 0; i < 100; i++) { + if (!tp->phy_reset_pending(tp)) + return; + msleep(1); + } + netif_err(tp, link, dev, "PHY reset failed\n"); + } + + static bool rtl_tbi_enabled(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + return (tp->mac_version == RTL_GIGA_MAC_VER_01) && + (RTL_R8(PHYstatus) & TBI_Enable); + } + + static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_phy_config(dev); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + } + + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 + } + + rtl8169_phy_reset(dev, tp); + + rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0)); + + if (rtl_tbi_enabled(tp)) + netif_info(tp, link, dev, "TBI auto-negotiating\n"); + } + + static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) + { + void __iomem *ioaddr = tp->mmio_addr; + u32 high; + u32 low; + + low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); + high = addr[4] | (addr[5] << 8); + + spin_lock_irq(&tp->lock); + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W32(MAC4, high); + RTL_R32(MAC4); + + RTL_W32(MAC0, low); + RTL_R32(MAC0); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + const struct exgmac_reg e[] = { + { .addr = 0xe0, ERIAR_MASK_1111, .val = low }, + { .addr = 0xe4, ERIAR_MASK_1111, .val = high }, + { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 }, + { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 | + low >> 16 }, + }; + + rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e)); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + spin_unlock_irq(&tp->lock); + } + + static int rtl_set_mac_address(struct net_device *dev, void *p) + { + struct rtl8169_private *tp = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + rtl_rar_set(tp, dev->dev_addr); + + return 0; + } + + static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + { + struct rtl8169_private *tp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; + } + + static int rtl_xmii_ioctl(struct rtl8169_private *tp, + struct mii_ioctl_data *data, int cmd) + { + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 32; /* Internal PHY */ + return 0; + + case SIOCGMIIREG: + data->val_out = rtl_readphy(tp, data->reg_num & 0x1f); + return 0; + + case SIOCSMIIREG: + rtl_writephy(tp, data->reg_num & 0x1f, data->val_in); + return 0; + } + return -EOPNOTSUPP; + } + + static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) + { + return -EOPNOTSUPP; + } + + static const struct rtl_cfg_info { + void (*hw_start)(struct net_device *); + unsigned int region; + unsigned int align; + u16 intr_event; + u16 napi_event; + unsigned features; + u8 default_ver; + } rtl_cfg_infos [] = { + [RTL_CFG_0] = { + .hw_start = rtl_hw_start_8169, + .region = 1, + .align = 0, + .intr_event = SYSErr | LinkChg | RxOverflow | + RxFIFOOver | TxErr | TxOK | RxOK | RxErr, + .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, + .features = RTL_FEATURE_GMII, + .default_ver = RTL_GIGA_MAC_VER_01, + }, + [RTL_CFG_1] = { + .hw_start = rtl_hw_start_8168, + .region = 2, + .align = 8, + .intr_event = SYSErr | LinkChg | RxOverflow | + TxErr | TxOK | RxOK | RxErr, + .napi_event = TxErr | TxOK | RxOK | RxOverflow, + .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_11, + }, + [RTL_CFG_2] = { + .hw_start = rtl_hw_start_8101, + .region = 2, + .align = 8, + .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | + RxFIFOOver | TxErr | TxOK | RxOK | RxErr, + .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, + .features = RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_13, + } + }; + + /* Cfg9346_Unlock assumed. */ + static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, + const struct rtl_cfg_info *cfg) + { + unsigned msi = 0; + u8 cfg2; + + cfg2 = RTL_R8(Config2) & ~MSIEnable; + if (cfg->features & RTL_FEATURE_MSI) { + if (pci_enable_msi(pdev)) { + dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); + } else { + cfg2 |= MSIEnable; + msi = RTL_FEATURE_MSI; + } + } + RTL_W8(Config2, cfg2); + return msi; + } + + static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) + { + if (tp->features & RTL_FEATURE_MSI) { + pci_disable_msi(pdev); + tp->features &= ~RTL_FEATURE_MSI; + } + } + + static const struct net_device_ops rtl8169_netdev_ops = { + .ndo_open = rtl8169_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats = rtl8169_get_stats, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = rtl8169_ioctl, + .ndo_set_rx_mode = rtl_set_rx_mode, + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, + #endif + + }; + + static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) + { + struct mdio_ops *ops = &tp->mdio_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + ops->write = r8168dp_1_mdio_write; + ops->read = r8168dp_1_mdio_read; + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + ops->write = r8168dp_2_mdio_write; + ops->read = r8168dp_2_mdio_read; + break; + default: + ops->write = r8169_mdio_write; + ops->read = r8169_mdio_read; + break; + } + } + ++static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) ++{ ++ void __iomem *ioaddr = tp->mmio_addr; ++ ++ switch (tp->mac_version) { ++ case RTL_GIGA_MAC_VER_29: ++ case RTL_GIGA_MAC_VER_30: ++ case RTL_GIGA_MAC_VER_32: ++ case RTL_GIGA_MAC_VER_33: ++ case RTL_GIGA_MAC_VER_34: ++ RTL_W32(RxConfig, RTL_R32(RxConfig) | ++ AcceptBroadcast | AcceptMulticast | AcceptMyPhys); ++ break; ++ default: ++ break; ++ } ++} ++ ++static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) ++{ ++ if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) ++ return false; ++ ++ rtl_writephy(tp, 0x1f, 0x0000); ++ rtl_writephy(tp, MII_BMCR, 0x0000); ++ ++ rtl_wol_suspend_quirk(tp); ++ ++ return true; ++} ++ + static void r810x_phy_power_down(struct rtl8169_private *tp) + { + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); + } + + static void r810x_phy_power_up(struct rtl8169_private *tp) + { + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); + } + + static void r810x_pll_power_down(struct rtl8169_private *tp) + { - void __iomem *ioaddr = tp->mmio_addr; - - if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - - if (tp->mac_version == RTL_GIGA_MAC_VER_29 || - tp->mac_version == RTL_GIGA_MAC_VER_30) - RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | - AcceptMulticast | AcceptMyPhys); ++ if (rtl_wol_pll_power_down(tp)) + return; - } + + r810x_phy_power_down(tp); + } + + static void r810x_pll_power_up(struct rtl8169_private *tp) + { + r810x_phy_power_up(tp); + } + + static void r8168_phy_power_up(struct rtl8169_private *tp) + { + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0000); + break; + default: + break; + } + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); + } + + static void r8168_phy_power_down(struct rtl8169_private *tp) + { + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0200); + default: + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); + break; + } + } + + static void r8168_pll_power_down(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) && + r8168dp_check_dash(tp)) { + return; + } + + if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || + tp->mac_version == RTL_GIGA_MAC_VER_24) && + (RTL_R16(CPlusCmd) & ASF)) { + return; + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(ioaddr, 0x19, 0xff64); + - if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - - if (tp->mac_version == RTL_GIGA_MAC_VER_32 || - tp->mac_version == RTL_GIGA_MAC_VER_33 || - tp->mac_version == RTL_GIGA_MAC_VER_34) - RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | - AcceptMulticast | AcceptMyPhys); ++ if (rtl_wol_pll_power_down(tp)) + return; - } + + r8168_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + } + } + + static void r8168_pll_power_up(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) && + r8168dp_check_dash(tp)) { + return; + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + } + + r8168_phy_power_up(tp); + } + + static void rtl_generic_op(struct rtl8169_private *tp, + void (*op)(struct rtl8169_private *)) + { + if (op) + op(tp); + } + + static void rtl_pll_power_down(struct rtl8169_private *tp) + { + rtl_generic_op(tp, tp->pll_power_ops.down); + } + + static void rtl_pll_power_up(struct rtl8169_private *tp) + { + rtl_generic_op(tp, tp->pll_power_ops.up); + } + + static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) + { + struct pll_power_ops *ops = &tp->pll_power_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + ops->down = r810x_pll_power_down; + ops->up = r810x_pll_power_up; + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + ops->down = r8168_pll_power_down; + ops->up = r8168_pll_power_up; + break; + + default: + ops->down = NULL; + ops->up = NULL; + break; + } + } + + static void rtl_init_rxcfg(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + default: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } + } + + static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) + { + tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; + } + + static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) + { + rtl_generic_op(tp, tp->jumbo_ops.enable); + } + + static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) + { + rtl_generic_op(tp, tp->jumbo_ops.disable); + } + + static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); + } + + static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + } + + static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + } + + static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + } + + static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(MaxTxPacketSize, 0x3f); + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | 0x01); + pci_write_config_byte(pdev, 0x79, 0x20); + } + + static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(MaxTxPacketSize, 0x0c); + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~0x01); + pci_write_config_byte(pdev, 0x79, 0x50); + } + + static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) + { + rtl_tx_performance_tweak(tp->pci_dev, + (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); + } + + static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) + { + rtl_tx_performance_tweak(tp->pci_dev, + (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); + } + + static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_enable(tp); + + RTL_W8(Config4, RTL_R8(Config4) | (1 << 0)); + } + + static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_disable(tp); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); + } + + static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp) + { + struct jumbo_ops *ops = &tp->jumbo_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + ops->disable = r8168b_0_hw_jumbo_disable; + ops->enable = r8168b_0_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + ops->disable = r8168b_1_hw_jumbo_disable; + ops->enable = r8168b_1_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + ops->disable = r8168c_hw_jumbo_disable; + ops->enable = r8168c_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + ops->disable = r8168dp_hw_jumbo_disable; + ops->enable = r8168dp_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + ops->disable = r8168e_hw_jumbo_disable; + ops->enable = r8168e_hw_jumbo_enable; + break; + + /* + * No action needed for jumbo frames with 8169. + * No jumbo for 810x at all. + */ + default: + ops->disable = NULL; + ops->enable = NULL; + break; + } + } + + static void rtl_hw_reset(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + int i; + + /* Soft reset the chip. */ + RTL_W8(ChipCmd, CmdReset); + + /* Check that the chip has finished the reset. */ + for (i = 0; i < 100; i++) { + if ((RTL_R8(ChipCmd) & CmdReset) == 0) + break; + udelay(100); + } + + rtl8169_init_ring_indexes(tp); + } + + static int __devinit + rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + { + const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + const unsigned int region = cfg->region; + struct rtl8169_private *tp; + struct mii_if_info *mii; + struct net_device *dev; + void __iomem *ioaddr; + int chipset, i; + int rc; + + if (netif_msg_drv(&debug)) { + printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", + MODULENAME, RTL8169_VERSION); + } + + dev = alloc_etherdev(sizeof (*tp)); + if (!dev) { + if (netif_msg_drv(&debug)) + dev_err(&pdev->dev, "unable to alloc new ethernet\n"); + rc = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl8169_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); + + mii = &tp->mii; + mii->dev = dev; + mii->mdio_read = rtl_mdio_read; + mii->mdio_write = rtl_mdio_write; + mii->phy_id_mask = 0x1f; + mii->reg_num_mask = 0x1f; + mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device(pdev); + if (rc < 0) { + netif_err(tp, probe, dev, "enable failure\n"); + goto err_out_free_dev_1; + } + + if (pci_set_mwi(pdev) < 0) + netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { + netif_err(tp, probe, dev, + "region #%d not an MMIO resource, aborting\n", + region); + rc = -ENODEV; + goto err_out_mwi_2; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + netif_err(tp, probe, dev, + "Invalid PCI region size(s), aborting\n"); + rc = -ENODEV; + goto err_out_mwi_2; + } + + rc = pci_request_regions(pdev, MODULENAME); + if (rc < 0) { + netif_err(tp, probe, dev, "could not request regions\n"); + goto err_out_mwi_2; + } + + tp->cp_cmd = RxChkSum; + + if ((sizeof(dma_addr_t) > 4) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + netif_err(tp, probe, dev, "DMA configuration failed\n"); + goto err_out_free_res_3; + } + } + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); + if (!ioaddr) { + netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); + rc = -EIO; + goto err_out_free_res_3; + } + tp->mmio_addr = ioaddr; + + if (!pci_is_pcie(pdev)) + netif_info(tp, probe, dev, "not PCI Express\n"); + + /* Identify chip attached to board */ + rtl8169_get_mac_version(tp, dev, cfg->default_ver); + + rtl_init_rxcfg(tp); + + RTL_W16(IntrMask, 0x0000); + + rtl_hw_reset(tp); + + RTL_W16(IntrStatus, 0xffff); + + pci_set_master(pdev); + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + tp->cp_cmd |= RxVlan; + + rtl_init_mdio_ops(tp); + rtl_init_pll_power_ops(tp); + rtl_init_jumbo_ops(tp); + + rtl8169_print_mac_version(tp); + + chipset = tp->mac_version; + tp->txd_version = rtl_chip_infos[chipset].txd_version; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Config1, RTL_R8(Config1) | PMEnable); + RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); + if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) + tp->features |= RTL_FEATURE_WOL; + if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) + tp->features |= RTL_FEATURE_WOL; + tp->features |= rtl_try_msi(pdev, ioaddr, cfg); + RTL_W8(Cfg9346, Cfg9346_Lock); + + if (rtl_tbi_enabled(tp)) { + tp->set_speed = rtl8169_set_speed_tbi; + tp->get_settings = rtl8169_gset_tbi; + tp->phy_reset_enable = rtl8169_tbi_reset_enable; + tp->phy_reset_pending = rtl8169_tbi_reset_pending; + tp->link_ok = rtl8169_tbi_link_ok; + tp->do_ioctl = rtl_tbi_ioctl; + } else { + tp->set_speed = rtl8169_set_speed_xmii; + tp->get_settings = rtl8169_gset_xmii; + tp->phy_reset_enable = rtl8169_xmii_reset_enable; + tp->phy_reset_pending = rtl8169_xmii_reset_pending; + tp->link_ok = rtl8169_xmii_link_ok; + tp->do_ioctl = rtl_xmii_ioctl; + } + + spin_lock_init(&tp->lock); + + /* Get MAC address */ + for (i = 0; i < MAC_ADDR_LEN; i++) + dev->dev_addr[i] = RTL_R8(MAC0 + i); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); + dev->watchdog_timeo = RTL8169_TX_TIMEOUT; + dev->irq = pdev->irq; + dev->base_addr = (unsigned long) ioaddr; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); + + /* don't enable SG, IP_CSUM and TSO by default - it might not work + * properly for all devices */ + dev->features |= NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* 8110SCd requires hardware Rx VLAN - disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_RX; + + tp->intr_mask = 0xffff; + tp->hw_start = cfg->hw_start; + tp->intr_event = cfg->intr_event; + tp->napi_event = cfg->napi_event; + + tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? + ~(RxBOVF | RxFOVF) : ~0; + + init_timer(&tp->timer); + tp->timer.data = (unsigned long) dev; + tp->timer.function = rtl8169_phy_timer; + + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + + rc = register_netdev(dev); + if (rc < 0) + goto err_out_msi_4; + + pci_set_drvdata(pdev, dev); + + netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n", + rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr, + (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq); + if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { + netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " + "tx checksumming: %s]\n", + rtl_chip_infos[chipset].jumbo_max, + rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_start(tp); + } + + device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + netif_carrier_off(dev); + + out: + return rc; + + err_out_msi_4: + rtl_disable_msi(pdev, tp); + iounmap(ioaddr); + err_out_free_res_3: + pci_release_regions(pdev); + err_out_mwi_2: + pci_clear_mwi(pdev); + pci_disable_device(pdev); + err_out_free_dev_1: + free_netdev(dev); + goto out; + } + + static void __devexit rtl8169_remove_one(struct pci_dev *pdev) + { + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_stop(tp); + } + + cancel_delayed_work_sync(&tp->task); + + unregister_netdev(dev); + + rtl_release_firmware(tp); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl_disable_msi(pdev, tp); + rtl8169_release_board(pdev, dev, tp->mmio_addr); + pci_set_drvdata(pdev, NULL); + } + + static void rtl_request_uncached_firmware(struct rtl8169_private *tp) + { + struct rtl_fw *rtl_fw; + const char *name; + int rc = -ENOMEM; + + name = rtl_lookup_firmware_name(tp); + if (!name) + goto out_no_firmware; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + goto err_warn; + + rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev); + if (rc < 0) + goto err_free; + + rc = rtl_check_firmware(tp, rtl_fw); + if (rc < 0) + goto err_release_firmware; + + tp->rtl_fw = rtl_fw; + out: + return; + + err_release_firmware: + release_firmware(rtl_fw->fw); + err_free: + kfree(rtl_fw); + err_warn: + netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n", + name, rc); + out_no_firmware: + tp->rtl_fw = NULL; + goto out; + } + + static void rtl_request_firmware(struct rtl8169_private *tp) + { + if (IS_ERR(tp->rtl_fw)) + rtl_request_uncached_firmware(tp); + } + + static int rtl8169_open(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx desscriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(dev); + if (retval < 0) + goto err_free_rx_1; + + INIT_DELAYED_WORK(&tp->task, NULL); + + smp_mb(); + + rtl_request_firmware(tp); + + retval = request_irq(dev->irq, rtl8169_interrupt, + (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, + dev->name, dev); + if (retval < 0) + goto err_release_fw_2; + + napi_enable(&tp->napi); + + rtl8169_init_phy(dev, tp); + + rtl8169_set_features(dev, dev->features); + + rtl_pll_power_up(tp); + + rtl_hw_start(dev); + + tp->saved_wolopts = 0; + pm_runtime_put_noidle(&pdev->dev); + + rtl8169_check_link_status(dev, tp, ioaddr); + out: + return retval; + + err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); + err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; + err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; + } + + static void rtl_rx_close(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); + } + + static void rtl8169_hw_reset(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(ioaddr); + + rtl_rx_close(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + while (RTL_R8(TxPoll) & NPQ) + udelay(20); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) + udelay(100); + } else { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + udelay(100); + } + + rtl_hw_reset(tp); + } + + static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) + { + void __iomem *ioaddr = tp->mmio_addr; + + /* Set DMA burst size and Interframe Gap Time */ + RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); + } + + static void rtl_hw_start(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + + tp->hw_start(dev); + + netif_start_queue(dev); + } + + static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, + void __iomem *ioaddr) + { + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); + } + + static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) + { + u16 cmd; + + cmd = RTL_R16(CPlusCmd); + RTL_W16(CPlusCmd, cmd); + return cmd; + } + + static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) + { + /* Low hurts. Let's disable the filtering. */ + RTL_W16(RxMaxSize, rx_buf_sz + 1); + } + + static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) + { + static const struct rtl_cfg2_info { + u32 mac_version; + u32 clk; + u32 val; + } cfg2_info [] = { + { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd + { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, + { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe + { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } + }; + const struct rtl_cfg2_info *p = cfg2_info; + unsigned int i; + u32 clk; + + clk = RTL_R8(Config2) & PCI_Clock_66MHz; + for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { + if ((p->mac_version == mac_version) && (p->clk == clk)) { + RTL_W32(0x7c, p->val); + break; + } + } + } + + static void rtl_hw_start_8169(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) { + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); + } + + RTL_W8(Cfg9346, Cfg9346_Unlock); + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_init_rxcfg(tp); + + RTL_W8(EarlyTxThres, NoEarlyTx); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + rtl_set_rx_tx_config_registers(tp); + + tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + dprintk("Set MAC Reg C+CR Offset 0xE0. " + "Bit-3 and bit-14 MUST be 1\n"); + tp->cp_cmd |= (1 << 14); + } + + RTL_W16(CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(ioaddr, tp->mac_version); + + /* + * Undocumented corner. Supposedly: + * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets + */ + RTL_W16(IntrMitigate, 0x0000); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + if (tp->mac_version != RTL_GIGA_MAC_VER_01 && + tp->mac_version != RTL_GIGA_MAC_VER_02 && + tp->mac_version != RTL_GIGA_MAC_VER_03 && + tp->mac_version != RTL_GIGA_MAC_VER_04) { + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_tx_config_registers(tp); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + RTL_R8(IntrMask); + + RTL_W32(RxMissed, 0); + + rtl_set_rx_mode(dev); + + /* no early-rx interrupts */ + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); + + /* Enable all known interrupts by setting the interrupt mask. */ + RTL_W16(IntrMask, tp->intr_event); + } + + static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) + { + u32 csi; + + csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff; + rtl_csi_write(ioaddr, 0x070c, csi | bits); + } + + static void rtl_csi_access_enable_1(void __iomem *ioaddr) + { + rtl_csi_access_enable(ioaddr, 0x17000000); + } + + static void rtl_csi_access_enable_2(void __iomem *ioaddr) + { + rtl_csi_access_enable(ioaddr, 0x27000000); + } + + struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; + }; + + static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len) + { + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(ioaddr, e->offset, w); + e++; + } + } + + static void rtl_disable_clock_request(struct pci_dev *pdev) + { + int cap = pci_pcie_cap(pdev); + + if (cap) { + u16 ctl; + + pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); + ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); + } + } + + static void rtl_enable_clock_request(struct pci_dev *pdev) + { + int cap = pci_pcie_cap(pdev); + + if (cap) { + u16 ctl; + + pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); + ctl |= PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); + } + } + + #define R8168_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + + static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev) + { + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + + rtl_tx_performance_tweak(pdev, + (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); + } + + static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_hw_start_8168bb(ioaddr, pdev); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); + } + + static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev) + { + RTL_W8(Config1, RTL_R8(Config1) | Speed_down); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_disable_clock_request(pdev); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + } + + static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) + { + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_csi_access_enable_2(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); + + __rtl_hw_start_8168cp(ioaddr, pdev); + } + + static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_csi_access_enable_2(ioaddr); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + } + + static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_csi_access_enable_2(ioaddr); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(DBG_REG, 0x20); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + } + + static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev) + { + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_csi_access_enable_2(ioaddr); + + RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); + + __rtl_hw_start_8168cp(ioaddr, pdev); + } + + static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev) + { + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0220 } + }; + + rtl_csi_access_enable_2(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); + + __rtl_hw_start_8168cp(ioaddr, pdev); + } + + static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_hw_start_8168c_2(ioaddr, pdev); + } + + static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_csi_access_enable_2(ioaddr); + + __rtl_hw_start_8168cp(ioaddr, pdev); + } + + static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_csi_access_enable_2(ioaddr); + + rtl_disable_clock_request(pdev); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + } + + static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_csi_access_enable_1(ioaddr); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); + } + + static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) + { + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, ~0, 0x48 }, + { 0x19, 0x20, 0x50 }, + { 0x0c, ~0, 0x20 } + }; + int i; + + rtl_csi_access_enable_1(ioaddr); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) { + const struct ephy_info *e = e_info_8168d_4 + i; + u16 w; + + w = rtl_ephy_read(ioaddr, e->offset); + rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits); + } + + rtl_enable_clock_request(pdev); + } + + static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) + { + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_csi_access_enable_2(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); + + /* Reset tx FIFO pointer */ + RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST); + RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST); + + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + } + + static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev) + { + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_csi_access_enable_1(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, + ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + } + + static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) + { + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_csi_access_enable_1(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, + ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + } + + static void rtl_hw_start_8168(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; + + RTL_W16(CPlusCmd, tp->cp_cmd); + + RTL_W16(IntrMitigate, 0x5151); + + /* Work around for RxFIFO overflow. */ + if (tp->mac_version == RTL_GIGA_MAC_VER_11 || + tp->mac_version == RTL_GIGA_MAC_VER_22) { + tp->intr_event |= RxFIFOOver | PCSTimeout; + tp->intr_event &= ~RxOverflow; + } + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + rtl_set_rx_mode(dev); + + RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); + + RTL_R8(IntrMask); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + rtl_hw_start_8168bb(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + rtl_hw_start_8168bef(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_18: + rtl_hw_start_8168cp_1(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_19: + rtl_hw_start_8168c_1(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_20: + rtl_hw_start_8168c_2(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_21: + rtl_hw_start_8168c_3(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_22: + rtl_hw_start_8168c_4(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_23: + rtl_hw_start_8168cp_2(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_24: + rtl_hw_start_8168cp_3(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + rtl_hw_start_8168d(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_28: + rtl_hw_start_8168d_4(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_31: + rtl_hw_start_8168dp(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl_hw_start_8168e_1(ioaddr, pdev); + break; + case RTL_GIGA_MAC_VER_34: + rtl_hw_start_8168e_2(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + rtl_hw_start_8168f_1(ioaddr, pdev); + break; + + default: + printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", + dev->name, tp->mac_version); + break; + } + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); + + RTL_W16(IntrMask, tp->intr_event); + } + + #define R810X_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + + static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) + { + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_csi_access_enable_2(ioaddr); + + RTL_W8(DBG_REG, FIX_NAK_1); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + cfg1 = RTL_R8(Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); + } + + static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_csi_access_enable_2(ioaddr); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + } + + static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_hw_start_8102e_2(ioaddr, pdev); + + rtl_ephy_write(ioaddr, 0x03, 0xc2f9); + } + + static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) + { + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000); + + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + + rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); + } + + static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev) + { + rtl_hw_start_8105e_1(ioaddr, pdev); + rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); + } + + static void rtl_hw_start_8101(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || + tp->mac_version == RTL_GIGA_MAC_VER_16) { + int cap = pci_pcie_cap(pdev); + + if (cap) { + pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN); + } + } + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + rtl_hw_start_8102e_1(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_08: + rtl_hw_start_8102e_3(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_09: + rtl_hw_start_8102e_2(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_29: + rtl_hw_start_8105e_1(ioaddr, pdev); + break; + case RTL_GIGA_MAC_VER_30: + rtl_hw_start_8105e_2(ioaddr, pdev); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; + RTL_W16(CPlusCmd, tp->cp_cmd); + + RTL_W16(IntrMitigate, 0x0000); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_tx_config_registers(tp); + + RTL_R8(IntrMask); + + rtl_set_rx_mode(dev); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); + + RTL_W16(IntrMask, tp->intr_event); + } + + static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) + { + struct rtl8169_private *tp = netdev_priv(dev); + + if (new_mtu < ETH_ZLEN || + new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max) + return -EINVAL; + + if (new_mtu > ETH_DATA_LEN) + rtl_hw_jumbo_enable(tp); + else + rtl_hw_jumbo_disable(tp); + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; + } + + static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) + { + desc->addr = cpu_to_le64(0x0badbadbadbadbadull); + desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); + } + + static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, + void **data_buff, struct RxDesc *desc) + { + dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz, + DMA_FROM_DEVICE); + + kfree(*data_buff); + *data_buff = NULL; + rtl8169_make_unusable_by_asic(desc); + } + + static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz) + { + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); + } + + static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, + u32 rx_buf_sz) + { + desc->addr = cpu_to_le64(mapping); + wmb(); + rtl8169_mark_to_asic(desc, rx_buf_sz); + } + + static inline void *rtl8169_align(void *data) + { + return (void *)ALIGN((long)data, 16); + } + + static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) + { + void *data; + dma_addr_t mapping; + struct device *d = &tp->pci_dev->dev; + struct net_device *dev = tp->dev; + int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; + + data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + if (!data) + return NULL; + + if (rtl8169_align(data) != data) { + kfree(data); + data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node); + if (!data) + return NULL; + } + + mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); + goto err_out; + } + + rtl8169_map_to_asic(desc, mapping, rx_buf_sz); + return data; + + err_out: + kfree(data); + return NULL; + } + + static void rtl8169_rx_clear(struct rtl8169_private *tp) + { + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (tp->Rx_databuff[i]) { + rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, + tp->RxDescArray + i); + } + } + } + + static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) + { + desc->opts1 |= cpu_to_le32(RingEnd); + } + + static int rtl8169_rx_fill(struct rtl8169_private *tp) + { + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + void *data; + + if (tp->Rx_databuff[i]) + continue; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_make_unusable_by_asic(tp->RxDescArray + i); + goto err_out; + } + tp->Rx_databuff[i] = data; + } + + rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); + return 0; + + err_out: + rtl8169_rx_clear(tp); + return -ENOMEM; + } + + static int rtl8169_init_ring(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); + memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *)); + + return rtl8169_rx_fill(tp); + } + + static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb, + struct TxDesc *desc) + { + unsigned int len = tx_skb->len; + + dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE); + + desc->opts1 = 0x00; + desc->opts2 = 0x00; + desc->addr = 0x00; + tx_skb->len = 0; + } + + static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) + { + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (skb) { + tp->dev->stats.tx_dropped++; + dev_kfree_skb(skb); + tx_skb->skb = NULL; + } + } + } + } + + static void rtl8169_tx_clear(struct rtl8169_private *tp) + { + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + tp->cur_tx = tp->dirty_tx = 0; + } + + static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) + { + struct rtl8169_private *tp = netdev_priv(dev); + + PREPARE_DELAYED_WORK(&tp->task, task); + schedule_delayed_work(&tp->task, 4); + } + + static void rtl8169_wait_for_quiescence(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + synchronize_irq(dev->irq); + + /* Wait for any pending NAPI task to complete */ + napi_disable(&tp->napi); + + rtl8169_irq_mask_and_ack(ioaddr); + + tp->intr_mask = 0xffff; + RTL_W16(IntrMask, tp->intr_event); + napi_enable(&tp->napi); + } + + static void rtl8169_reinit_task(struct work_struct *work) + { + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, task.work); + struct net_device *dev = tp->dev; + int ret; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + rtl8169_wait_for_quiescence(dev); + rtl8169_close(dev); + + ret = rtl8169_open(dev); + if (unlikely(ret < 0)) { + if (net_ratelimit()) + netif_err(tp, drv, dev, + "reinit failure (status = %d). Rescheduling\n", + ret); + rtl8169_schedule_work(dev, rtl8169_reinit_task); + } + + out_unlock: + rtnl_unlock(); + } + + static void rtl8169_reset_task(struct work_struct *work) + { + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, task.work); + struct net_device *dev = tp->dev; + int i; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + rtl8169_wait_for_quiescence(dev); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); + + rtl8169_tx_clear(tp); + + rtl8169_hw_reset(tp); + rtl_hw_start(dev); + netif_wake_queue(dev); + rtl8169_check_link_status(dev, tp, tp->mmio_addr); + + out_unlock: + rtnl_unlock(); + } + + static void rtl8169_tx_timeout(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_hw_reset(tp); + + /* Let's wait a bit while any (async) irq lands on */ + rtl8169_schedule_work(dev, rtl8169_reset_task); + } + + static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + u32 *opts) + { + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag, entry; + struct TxDesc * uninitialized_var(txd); + struct device *d = &tp->pci_dev->dev; + + entry = tp->cur_tx; + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + dma_addr_t mapping; + u32 status, len; + void *addr; + + entry = (entry + 1) % NUM_TX_DESC; + + txd = tp->TxDescArray + entry; + len = skb_frag_size(frag); + addr = skb_frag_address(frag); + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, + "Failed to map TX fragments DMA!\n"); + goto err_out; + } + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | + (RingEnd * !((entry + 1) % NUM_TX_DESC)); + + txd->opts1 = cpu_to_le32(status); + txd->opts2 = cpu_to_le32(opts[1]); + txd->addr = cpu_to_le64(mapping); + + tp->tx_skb[entry].len = len; + } + + if (cur_frag) { + tp->tx_skb[entry].skb = skb; + txd->opts1 |= cpu_to_le32(LastFrag); + } + + return cur_frag; + + err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; + } + + static inline void rtl8169_tso_csum(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) + { + const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version; + u32 mss = skb_shinfo(skb)->gso_size; + int offset = info->opts_offset; + + if (mss) { + opts[0] |= TD_LSO; + opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[offset] |= info->checksum.tcp; + else if (ip->protocol == IPPROTO_UDP) + opts[offset] |= info->checksum.udp; + else + WARN_ON_ONCE(1); + } + } + + static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd = tp->TxDescArray + entry; + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + dma_addr_t mapping; + u32 status, len; + u32 opts[2]; + int frags; + + if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { + netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) + goto err_stop_0; + + len = skb_headlen(skb); + mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, dev, "Failed to map TX DMA!\n"); + goto err_dma_0; + } + + tp->tx_skb[entry].len = len; + txd->addr = cpu_to_le64(mapping); + + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb)); + opts[0] = DescOwn; + + rtl8169_tso_csum(tp, skb, opts); + + frags = rtl8169_xmit_frags(tp, skb, opts); + if (frags < 0) + goto err_dma_1; + else if (frags) + opts[0] |= FirstFrag; + else { + opts[0] |= FirstFrag | LastFrag; + tp->tx_skb[entry].skb = skb; + } + + txd->opts2 = cpu_to_le32(opts[1]); + + wmb(); + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); + txd->opts1 = cpu_to_le32(status); + + tp->cur_tx += frags + 1; + + wmb(); + + RTL_W8(TxPoll, NPQ); + + if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { + netif_stop_queue(dev); + smp_rmb(); + if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + + err_dma_1: + rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); + err_dma_0: + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + + err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; + } + + static void rtl8169_pcierr_interrupt(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n", + pci_cmd, pci_status); + + /* + * The recovery sequence below admits a very elaborated explanation: + * - it seems to work; + * - I did not see what else could be done; + * - it makes iop3xx happy. + * + * Feel free to adjust to your needs. + */ + if (pdev->broken_parity_status) + pci_cmd &= ~PCI_COMMAND_PARITY; + else + pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; + + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + + pci_write_config_word(pdev, PCI_STATUS, + pci_status & (PCI_STATUS_DETECTED_PARITY | + PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); + + /* The infamous DAC f*ckup only happens at boot time */ + if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { + void __iomem *ioaddr = tp->mmio_addr; + + netif_info(tp, intr, dev, "disabling PCI DAC\n"); + tp->cp_cmd &= ~PCIDAC; + RTL_W16(CPlusCmd, tp->cp_cmd); + dev->features &= ~NETIF_F_HIGHDMA; + } + + rtl8169_hw_reset(tp); + + rtl8169_schedule_work(dev, rtl8169_reinit_task); + } + + static void rtl8169_tx_interrupt(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) + { + unsigned int dirty_tx, tx_left; + + dirty_tx = tp->dirty_tx; + smp_rmb(); + tx_left = tp->cur_tx - dirty_tx; + + while (tx_left > 0) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + u32 status; + + rmb(); + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (status & LastFrag) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += tx_skb->skb->len; + dev_kfree_skb(tx_skb->skb); + tx_skb->skb = NULL; + } + dirty_tx++; + tx_left--; + } + + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + smp_wmb(); + if (netif_queue_stopped(dev) && + (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { + netif_wake_queue(dev); + } + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + */ + smp_rmb(); + if (tp->cur_tx != dirty_tx) + RTL_W8(TxPoll, NPQ); + } + } + + static inline int rtl8169_fragmented_frame(u32 status) + { + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); + } + + static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) + { + u32 status = opts1 & RxProtoMask; + + if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || + ((status == RxProtoUDP) && !(opts1 & UDPFail))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + } + + static struct sk_buff *rtl8169_try_rx_copy(void *data, + struct rtl8169_private *tp, + int pkt_size, + dma_addr_t addr) + { + struct sk_buff *skb; + struct device *d = &tp->pci_dev->dev; + + data = rtl8169_align(data); + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(data); + skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); + if (skb) + memcpy(skb->data, data, pkt_size); + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + return skb; + } + + static int rtl8169_rx_interrupt(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr, u32 budget) + { + unsigned int cur_rx, rx_left; + unsigned int count; + + cur_rx = tp->cur_rx; + rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; + rx_left = min(rx_left, budget); + + for (; rx_left > 0; rx_left--, cur_rx++) { + unsigned int entry = cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + u32 status; + + rmb(); + status = le32_to_cpu(desc->opts1) & tp->opts1_mask; + + if (status & DescOwn) + break; + if (unlikely(status & RxRES)) { + netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + if (status & RxFOVF) { + rtl8169_schedule_work(dev, rtl8169_reset_task); + dev->stats.rx_fifo_errors++; + } + rtl8169_mark_to_asic(desc, rx_buf_sz); + } else { + struct sk_buff *skb; + dma_addr_t addr = le64_to_cpu(desc->addr); + int pkt_size = (status & 0x00003fff) - 4; + + /* + * The driver does not support incoming fragmented + * frames. They are seen as a symptom of over-mtu + * sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + rtl8169_mark_to_asic(desc, rx_buf_sz); + continue; + } + + skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], + tp, pkt_size, addr); + rtl8169_mark_to_asic(desc, rx_buf_sz); + if (!skb) { + dev->stats.rx_dropped++; + continue; + } + + rtl8169_rx_csum(skb, status); + skb_put(skb, pkt_size); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + napi_gro_receive(&tp->napi, skb); + + dev->stats.rx_bytes += pkt_size; + dev->stats.rx_packets++; + } + + /* Work around for AMD plateform. */ + if ((desc->opts2 & cpu_to_le32(0xfffe000)) && + (tp->mac_version == RTL_GIGA_MAC_VER_05)) { + desc->opts2 = 0; + cur_rx++; + } + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + tp->dirty_rx += count; + + return count; + } + + static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) + { + struct net_device *dev = dev_instance; + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int handled = 0; + int status; + + /* loop handling interrupts until we have no new ones or + * we hit a invalid/hotplug case. + */ + status = RTL_R16(IntrStatus); + while (status && status != 0xffff) { + handled = 1; + + /* Handle all of the error cases first. These will reset + * the chip, so just exit the loop. + */ + if (unlikely(!netif_running(dev))) { + rtl8169_hw_reset(tp); + break; + } + + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_26: + netif_stop_queue(dev); + rtl8169_tx_timeout(dev); + goto done; + /* Testers needed. */ + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + /* Experimental science. Pktgen proof. */ + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_25: + if (status == RxFIFOOver) + goto done; + break; + default: + break; + } + } + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(dev); + break; + } + + if (status & LinkChg) + __rtl8169_check_link_status(dev, tp, ioaddr, true); + + /* We need to see the lastest version of tp->intr_mask to + * avoid ignoring an MSI interrupt and having to wait for + * another event which may never come. + */ + smp_rmb(); + if (status & tp->intr_mask & tp->napi_event) { + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); + tp->intr_mask = ~tp->napi_event; + + if (likely(napi_schedule_prep(&tp->napi))) + __napi_schedule(&tp->napi); + else + netif_info(tp, intr, dev, + "interrupt %04x in poll\n", status); + } + + /* We only get a new MSI interrupt when all active irq + * sources on the chip have been acknowledged. So, ack + * everything we've seen and check if new sources have become + * active to avoid blocking all interrupts from the chip. + */ + RTL_W16(IntrStatus, + (status & RxFIFOOver) ? (status | RxOverflow) : status); + status = RTL_R16(IntrStatus); + } + done: + return IRQ_RETVAL(handled); + } + + static int rtl8169_poll(struct napi_struct *napi, int budget) + { + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->mmio_addr; + int work_done; + + work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); + rtl8169_tx_interrupt(dev, tp, ioaddr); + + if (work_done < budget) { + napi_complete(napi); + + /* We need for force the visibility of tp->intr_mask + * for other CPUs, as we can loose an MSI interrupt + * and potentially wait for a retransmit timeout if we don't. + * The posted write to IntrMask is safe, as it will + * eventually make it to the chip and we won't loose anything + * until it does. + */ + tp->intr_mask = 0xffff; + wmb(); + RTL_W16(IntrMask, tp->intr_event); + } + + return work_done; + } + + static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr) + { + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) + return; + + dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff); + RTL_W32(RxMissed, 0); + } + + static void rtl8169_down(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + del_timer_sync(&tp->timer); + + netif_stop_queue(dev); + + napi_disable(&tp->napi); + + spin_lock_irq(&tp->lock); + + rtl8169_hw_reset(tp); + /* + * At this point device interrupts can not be enabled in any function, + * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task, + * rtl8169_reinit_task) and napi is disabled (rtl8169_poll). + */ + rtl8169_rx_missed(dev, ioaddr); + + spin_unlock_irq(&tp->lock); + + synchronize_irq(dev->irq); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ + + rtl8169_tx_clear(tp); + + rtl8169_rx_clear(tp); + + rtl_pll_power_down(tp); + } + + static int rtl8169_close(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + /* Update counters before going down */ + rtl8169_update_counters(dev); + + rtl8169_down(dev); + + free_irq(dev->irq, dev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; + } + + static void rtl_set_rx_mode(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp = 0; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + spin_lock_irqsave(&tp->lock, flags); + + tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + u32 data = mc_filter[0]; + + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(data); + } + + RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); + + RTL_W32(RxConfig, tmp); + + spin_unlock_irqrestore(&tp->lock, flags); + } + + /** + * rtl8169_get_stats - Get rtl8169 read/write statistics + * @dev: The Ethernet Device to get statistics for + * + * Get TX/RX statistics for rtl8169 + */ + static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + if (netif_running(dev)) { + spin_lock_irqsave(&tp->lock, flags); + rtl8169_rx_missed(dev, ioaddr); + spin_unlock_irqrestore(&tp->lock, flags); + } + + return &dev->stats; + } + + static void rtl8169_net_suspend(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + rtl_pll_power_down(tp); + + netif_device_detach(dev); + netif_stop_queue(dev); + } + + #ifdef CONFIG_PM + + static int rtl8169_suspend(struct device *device) + { + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + + rtl8169_net_suspend(dev); + + return 0; + } + + static void __rtl8169_resume(struct net_device *dev) + { + struct rtl8169_private *tp = netdev_priv(dev); + + netif_device_attach(dev); + + rtl_pll_power_up(tp); + + rtl8169_schedule_work(dev, rtl8169_reset_task); + } + + static int rtl8169_resume(struct device *device) + { + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_phy(dev, tp); + + if (netif_running(dev)) + __rtl8169_resume(dev); + + return 0; + } + + static int rtl8169_runtime_suspend(struct device *device) + { + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + spin_lock_irq(&tp->lock); + tp->saved_wolopts = __rtl8169_get_wol(tp); + __rtl8169_set_wol(tp, WAKE_ANY); + spin_unlock_irq(&tp->lock); + + rtl8169_net_suspend(dev); + + return 0; + } + + static int rtl8169_runtime_resume(struct device *device) + { + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + spin_lock_irq(&tp->lock); + __rtl8169_set_wol(tp, tp->saved_wolopts); + tp->saved_wolopts = 0; + spin_unlock_irq(&tp->lock); + + rtl8169_init_phy(dev, tp); + + __rtl8169_resume(dev); + + return 0; + } + + static int rtl8169_runtime_idle(struct device *device) + { + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->TxDescArray ? -EBUSY : 0; + } + + static const struct dev_pm_ops rtl8169_pm_ops = { + .suspend = rtl8169_suspend, + .resume = rtl8169_resume, + .freeze = rtl8169_suspend, + .thaw = rtl8169_resume, + .poweroff = rtl8169_suspend, + .restore = rtl8169_resume, + .runtime_suspend = rtl8169_runtime_suspend, + .runtime_resume = rtl8169_runtime_resume, + .runtime_idle = rtl8169_runtime_idle, + }; + + #define RTL8169_PM_OPS (&rtl8169_pm_ops) + + #else /* !CONFIG_PM */ + + #define RTL8169_PM_OPS NULL + + #endif /* !CONFIG_PM */ + ++static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) ++{ ++ void __iomem *ioaddr = tp->mmio_addr; ++ ++ /* WoL fails with 8168b when the receiver is disabled. */ ++ switch (tp->mac_version) { ++ case RTL_GIGA_MAC_VER_11: ++ case RTL_GIGA_MAC_VER_12: ++ case RTL_GIGA_MAC_VER_17: ++ pci_clear_master(tp->pci_dev); ++ ++ RTL_W8(ChipCmd, CmdRxEnb); ++ /* PCI commit */ ++ RTL_R8(ChipCmd); ++ break; ++ default: ++ break; ++ } ++} ++ + static void rtl_shutdown(struct pci_dev *pdev) + { + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; + + rtl8169_net_suspend(dev); + + /* Restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + spin_lock_irq(&tp->lock); + + rtl8169_hw_reset(tp); + + spin_unlock_irq(&tp->lock); + + if (system_state == SYSTEM_POWER_OFF) { - /* WoL fails with 8168b when the receiver is disabled. */ - if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || - tp->mac_version == RTL_GIGA_MAC_VER_12 || - tp->mac_version == RTL_GIGA_MAC_VER_17) && - (tp->features & RTL_FEATURE_WOL)) { - pci_clear_master(pdev); - - RTL_W8(ChipCmd, CmdRxEnb); - /* PCI commit */ - RTL_R8(ChipCmd); ++ if (__rtl8169_get_wol(tp) & WAKE_ANY) { ++ rtl_wol_suspend_quirk(tp); ++ rtl_wol_shutdown_quirk(tp); + } + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } + } + + static struct pci_driver rtl8169_pci_driver = { + .name = MODULENAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl8169_init_one, + .remove = __devexit_p(rtl8169_remove_one), + .shutdown = rtl_shutdown, + .driver.pm = RTL8169_PM_OPS, + }; + + static int __init rtl8169_init_module(void) + { + return pci_register_driver(&rtl8169_pci_driver); + } + + static void __exit rtl8169_cleanup_module(void) + { + pci_unregister_driver(&rtl8169_pci_driver); + } + + module_init(rtl8169_init_module); + module_exit(rtl8169_cleanup_module); diff --combined drivers/net/ethernet/sfc/efx.c index 0000000,de9afeb..d5731f1 mode 000000,100644..100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@@ -1,0 -1,2732 +1,2734 @@@ + /**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + + #include <linux/module.h> + #include <linux/pci.h> + #include <linux/netdevice.h> + #include <linux/etherdevice.h> + #include <linux/delay.h> + #include <linux/notifier.h> + #include <linux/ip.h> + #include <linux/tcp.h> + #include <linux/in.h> + #include <linux/crc32.h> + #include <linux/ethtool.h> + #include <linux/topology.h> + #include <linux/gfp.h> + #include <linux/cpu_rmap.h> + #include "net_driver.h" + #include "efx.h" + #include "nic.h" + + #include "mcdi.h" + #include "workarounds.h" + + /************************************************************************** + * + * Type name strings + * + ************************************************************************** + */ + + /* Loopback mode names (see LOOPBACK_MODE()) */ + const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; + const char *efx_loopback_mode_names[] = { + [LOOPBACK_NONE] = "NONE", + [LOOPBACK_DATA] = "DATAPATH", + [LOOPBACK_GMAC] = "GMAC", + [LOOPBACK_XGMII] = "XGMII", + [LOOPBACK_XGXS] = "XGXS", + [LOOPBACK_XAUI] = "XAUI", + [LOOPBACK_GMII] = "GMII", + [LOOPBACK_SGMII] = "SGMII", + [LOOPBACK_XGBR] = "XGBR", + [LOOPBACK_XFI] = "XFI", + [LOOPBACK_XAUI_FAR] = "XAUI_FAR", + [LOOPBACK_GMII_FAR] = "GMII_FAR", + [LOOPBACK_SGMII_FAR] = "SGMII_FAR", + [LOOPBACK_XFI_FAR] = "XFI_FAR", + [LOOPBACK_GPHY] = "GPHY", + [LOOPBACK_PHYXS] = "PHYXS", + [LOOPBACK_PCS] = "PCS", + [LOOPBACK_PMAPMD] = "PMA/PMD", + [LOOPBACK_XPORT] = "XPORT", + [LOOPBACK_XGMII_WS] = "XGMII_WS", + [LOOPBACK_XAUI_WS] = "XAUI_WS", + [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", + [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", + [LOOPBACK_GMII_WS] = "GMII_WS", + [LOOPBACK_XFI_WS] = "XFI_WS", + [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", + [LOOPBACK_PHYXS_WS] = "PHYXS_WS", + }; + + const unsigned int efx_reset_type_max = RESET_TYPE_MAX; + const char *efx_reset_type_names[] = { + [RESET_TYPE_INVISIBLE] = "INVISIBLE", + [RESET_TYPE_ALL] = "ALL", + [RESET_TYPE_WORLD] = "WORLD", + [RESET_TYPE_DISABLE] = "DISABLE", + [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", + [RESET_TYPE_INT_ERROR] = "INT_ERROR", + [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", + [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", + [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", + [RESET_TYPE_TX_SKIP] = "TX_SKIP", + [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + }; + + #define EFX_MAX_MTU (9 * 1024) + + /* Reset workqueue. If any NIC has a hardware failure then a reset will be + * queued onto this work queue. This is not a per-nic work queue, because + * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. + */ + static struct workqueue_struct *reset_workqueue; + + /************************************************************************** + * + * Configurable values + * + *************************************************************************/ + + /* + * Use separate channels for TX and RX events + * + * Set this to 1 to use separate channels for TX and RX. It allows us + * to control interrupt affinity separately for TX and RX. + * + * This is only used in MSI-X interrupt mode + */ + static unsigned int separate_tx_channels; + module_param(separate_tx_channels, uint, 0444); + MODULE_PARM_DESC(separate_tx_channels, + "Use separate channels for TX and RX"); + + /* This is the weight assigned to each of the (per-channel) virtual + * NAPI devices. + */ + static int napi_weight = 64; + + /* This is the time (in jiffies) between invocations of the hardware + * monitor. On Falcon-based NICs, this will: + * - Check the on-board hardware monitor; + * - Poll the link state and reconfigure the hardware as necessary. + */ + static unsigned int efx_monitor_interval = 1 * HZ; + + /* This controls whether or not the driver will initialise devices + * with invalid MAC addresses stored in the EEPROM or flash. If true, + * such devices will be initialised with a random locally-generated + * MAC address. This allows for loading the sfc_mtd driver to + * reprogram the flash, even if the flash contents (including the MAC + * address) have previously been erased. + */ + static unsigned int allow_bad_hwaddr; + + /* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * The default for RX should strike a balance between increasing the + * round-trip latency and reducing overhead. + */ + static unsigned int rx_irq_mod_usec = 60; + + /* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * This default is chosen to ensure that a 10G link does not go idle + * while a TX queue is stopped after it has become full. A queue is + * restarted when it drops below half full. The time this takes (assuming + * worst case 3 descriptors per packet and 1024 descriptors) is + * 512 / 3 * 1.2 = 205 usec. + */ + static unsigned int tx_irq_mod_usec = 150; + + /* This is the first interrupt mode to try out of: + * 0 => MSI-X + * 1 => MSI + * 2 => legacy + */ + static unsigned int interrupt_mode; + + /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), + * i.e. the number of CPUs among which we may distribute simultaneous + * interrupt handling. + * + * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. + * The default (0) means to assign an interrupt to each package (level II cache) + */ + static unsigned int rss_cpus; + module_param(rss_cpus, uint, 0444); + MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); + + static int phy_flash_cfg; + module_param(phy_flash_cfg, int, 0644); + MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); + + static unsigned irq_adapt_low_thresh = 10000; + module_param(irq_adapt_low_thresh, uint, 0644); + MODULE_PARM_DESC(irq_adapt_low_thresh, + "Threshold score for reducing IRQ moderation"); + + static unsigned irq_adapt_high_thresh = 20000; + module_param(irq_adapt_high_thresh, uint, 0644); + MODULE_PARM_DESC(irq_adapt_high_thresh, + "Threshold score for increasing IRQ moderation"); + + static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW); + module_param(debug, uint, 0); + MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); + + /************************************************************************** + * + * Utility functions and prototypes + * + *************************************************************************/ + + static void efx_remove_channels(struct efx_nic *efx); + static void efx_remove_port(struct efx_nic *efx); + static void efx_init_napi(struct efx_nic *efx); + static void efx_fini_napi(struct efx_nic *efx); + static void efx_fini_napi_channel(struct efx_channel *channel); + static void efx_fini_struct(struct efx_nic *efx); + static void efx_start_all(struct efx_nic *efx); + static void efx_stop_all(struct efx_nic *efx); + + #define EFX_ASSERT_RESET_SERIALISED(efx) \ + do { \ + if ((efx->state == STATE_RUNNING) || \ + (efx->state == STATE_DISABLED)) \ + ASSERT_RTNL(); \ + } while (0) + + /************************************************************************** + * + * Event queue processing + * + *************************************************************************/ + + /* Process channel's event queue + * + * This function is responsible for processing the event queue of a + * single channel. The caller must guarantee that this function will + * never be concurrently called more than once on the same channel, + * though different channels may be being processed concurrently. + */ + static int efx_process_channel(struct efx_channel *channel, int budget) + { + struct efx_nic *efx = channel->efx; + int spent; + + if (unlikely(efx->reset_pending || !channel->enabled)) + return 0; + + spent = efx_nic_process_eventq(channel, budget); + if (spent == 0) + return 0; + + /* Deliver last RX packet. */ + if (channel->rx_pkt) { + __efx_rx_packet(channel, channel->rx_pkt, + channel->rx_pkt_csummed); + channel->rx_pkt = NULL; + } + + efx_rx_strategy(channel); + + efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); + + return spent; + } + + /* Mark channel as finished processing + * + * Note that since we will not receive further interrupts for this + * channel before we finish processing and call the eventq_read_ack() + * method, there is no need to use the interrupt hold-off timers. + */ + static inline void efx_channel_processed(struct efx_channel *channel) + { + /* The interrupt handler for this channel may set work_pending + * as soon as we acknowledge the events we've seen. Make sure + * it's cleared before then. */ + channel->work_pending = false; + smp_wmb(); + + efx_nic_eventq_read_ack(channel); + } + + /* NAPI poll handler + * + * NAPI guarantees serialisation of polls of the same device, which + * provides the guarantee required by efx_process_channel(). + */ + static int efx_poll(struct napi_struct *napi, int budget) + { + struct efx_channel *channel = + container_of(napi, struct efx_channel, napi_str); + struct efx_nic *efx = channel->efx; + int spent; + + netif_vdbg(efx, intr, efx->net_dev, + "channel %d NAPI poll executing on CPU %d\n", + channel->channel, raw_smp_processor_id()); + + spent = efx_process_channel(channel, budget); + + if (spent < budget) { + if (channel->channel < efx->n_rx_channels && + efx->irq_rx_adaptive && + unlikely(++channel->irq_count == 1000)) { + if (unlikely(channel->irq_mod_score < + irq_adapt_low_thresh)) { + if (channel->irq_moderation > 1) { + channel->irq_moderation -= 1; + efx->type->push_irq_moderation(channel); + } + } else if (unlikely(channel->irq_mod_score > + irq_adapt_high_thresh)) { + if (channel->irq_moderation < + efx->irq_rx_moderation) { + channel->irq_moderation += 1; + efx->type->push_irq_moderation(channel); + } + } + channel->irq_count = 0; + channel->irq_mod_score = 0; + } + + efx_filter_rfs_expire(channel); + + /* There is no race here; although napi_disable() will + * only wait for napi_complete(), this isn't a problem + * since efx_channel_processed() will have no effect if + * interrupts have already been disabled. + */ + napi_complete(napi); + efx_channel_processed(channel); + } + + return spent; + } + + /* Process the eventq of the specified channel immediately on this CPU + * + * Disable hardware generated interrupts, wait for any existing + * processing to finish, then directly poll (and ack ) the eventq. + * Finally reenable NAPI and interrupts. + * + * This is for use only during a loopback self-test. It must not + * deliver any packets up the stack as this can result in deadlock. + */ + void efx_process_channel_now(struct efx_channel *channel) + { + struct efx_nic *efx = channel->efx; + + BUG_ON(channel->channel >= efx->n_channels); + BUG_ON(!channel->enabled); + BUG_ON(!efx->loopback_selftest); + + /* Disable interrupts and wait for ISRs to complete */ + efx_nic_disable_interrupts(efx); + if (efx->legacy_irq) { + synchronize_irq(efx->legacy_irq); + efx->legacy_irq_enabled = false; + } + if (channel->irq) + synchronize_irq(channel->irq); + + /* Wait for any NAPI processing to complete */ + napi_disable(&channel->napi_str); + + /* Poll the channel */ + efx_process_channel(channel, channel->eventq_mask + 1); + + /* Ack the eventq. This may cause an interrupt to be generated + * when they are reenabled */ + efx_channel_processed(channel); + + napi_enable(&channel->napi_str); + if (efx->legacy_irq) + efx->legacy_irq_enabled = true; + efx_nic_enable_interrupts(efx); + } + + /* Create event queue + * Event queue memory allocations are done only once. If the channel + * is reset, the memory buffer will be reused; this guards against + * errors during channel reset and also simplifies interrupt handling. + */ + static int efx_probe_eventq(struct efx_channel *channel) + { + struct efx_nic *efx = channel->efx; + unsigned long entries; + + netif_dbg(channel->efx, probe, channel->efx->net_dev, + "chan %d create event queue\n", channel->channel); + + /* Build an event queue with room for one event per tx and rx buffer, + * plus some extra for link state events and MCDI completions. */ + entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); + EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); + channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; + + return efx_nic_probe_eventq(channel); + } + + /* Prepare channel's event queue */ + static void efx_init_eventq(struct efx_channel *channel) + { + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d init event queue\n", channel->channel); + + channel->eventq_read_ptr = 0; + + efx_nic_init_eventq(channel); + } + + static void efx_fini_eventq(struct efx_channel *channel) + { + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d fini event queue\n", channel->channel); + + efx_nic_fini_eventq(channel); + } + + static void efx_remove_eventq(struct efx_channel *channel) + { + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d remove event queue\n", channel->channel); + + efx_nic_remove_eventq(channel); + } + + /************************************************************************** + * + * Channel handling + * + *************************************************************************/ + + /* Allocate and initialise a channel structure, optionally copying + * parameters (but not resources) from an old channel structure. */ + static struct efx_channel * + efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) + { + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + int j; + + if (old_channel) { + channel = kmalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + *channel = *old_channel; + + channel->napi_dev = NULL; + memset(&channel->eventq, 0, sizeof(channel->eventq)); + + rx_queue = &channel->rx_queue; + rx_queue->buffer = NULL; + memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); + + for (j = 0; j < EFX_TXQ_TYPES; j++) { + tx_queue = &channel->tx_queue[j]; + if (tx_queue->channel) + tx_queue->channel = channel; + tx_queue->buffer = NULL; + memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); + } + } else { + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + channel->efx = efx; + channel->channel = i; + + for (j = 0; j < EFX_TXQ_TYPES; j++) { + tx_queue = &channel->tx_queue[j]; + tx_queue->efx = efx; + tx_queue->queue = i * EFX_TXQ_TYPES + j; + tx_queue->channel = channel; + } + } + + rx_queue = &channel->rx_queue; + rx_queue->efx = efx; + setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, + (unsigned long)rx_queue); + + return channel; + } + + static int efx_probe_channel(struct efx_channel *channel) + { + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc; + + netif_dbg(channel->efx, probe, channel->efx->net_dev, + "creating channel %d\n", channel->channel); + + rc = efx_probe_eventq(channel); + if (rc) + goto fail1; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + rc = efx_probe_tx_queue(tx_queue); + if (rc) + goto fail2; + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + rc = efx_probe_rx_queue(rx_queue); + if (rc) + goto fail3; + } + + channel->n_rx_frm_trunc = 0; + + return 0; + + fail3: + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_remove_rx_queue(rx_queue); + fail2: + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_remove_tx_queue(tx_queue); + fail1: + return rc; + } + + + static void efx_set_channel_names(struct efx_nic *efx) + { + struct efx_channel *channel; + const char *type = ""; + int number; + + efx_for_each_channel(channel, efx) { + number = channel->channel; + if (efx->n_channels > efx->n_rx_channels) { + if (channel->channel < efx->n_rx_channels) { + type = "-rx"; + } else { + type = "-tx"; + number -= efx->n_rx_channels; + } + } + snprintf(efx->channel_name[channel->channel], + sizeof(efx->channel_name[0]), + "%s%s-%d", efx->name, type, number); + } + } + + static int efx_probe_channels(struct efx_nic *efx) + { + struct efx_channel *channel; + int rc; + + /* Restart special buffer allocation */ + efx->next_buffer_table = 0; + + efx_for_each_channel(channel, efx) { + rc = efx_probe_channel(channel); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to create channel %d\n", + channel->channel); + goto fail; + } + } + efx_set_channel_names(efx); + + return 0; + + fail: + efx_remove_channels(efx); + return rc; + } + + /* Channels are shutdown and reinitialised whilst the NIC is running + * to propagate configuration changes (mtu, checksum offload), or + * to clear hardware error conditions + */ + static void efx_init_channels(struct efx_nic *efx) + { + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + + /* Calculate the rx buffer allocation parameters required to + * support the current MTU, including padding for header + * alignment and overruns. + */ + efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_hash_size + + efx->type->rx_buffer_padding); + efx->rx_buffer_order = get_order(efx->rx_buffer_len + + sizeof(struct efx_rx_page_state)); + + /* Initialise the channels */ + efx_for_each_channel(channel, efx) { + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "init chan %d\n", channel->channel); + + efx_init_eventq(channel); + + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_init_tx_queue(tx_queue); + + /* The rx buffer allocation strategy is MTU dependent */ + efx_rx_strategy(channel); + + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_init_rx_queue(rx_queue); + + WARN_ON(channel->rx_pkt != NULL); + efx_rx_strategy(channel); + } + } + + /* This enables event queue processing and packet transmission. + * + * Note that this function is not allowed to fail, since that would + * introduce too much complexity into the suspend/resume path. + */ + static void efx_start_channel(struct efx_channel *channel) + { + struct efx_rx_queue *rx_queue; + + netif_dbg(channel->efx, ifup, channel->efx->net_dev, + "starting chan %d\n", channel->channel); + + /* The interrupt handler for this channel may set work_pending + * as soon as we enable it. Make sure it's cleared before + * then. Similarly, make sure it sees the enabled flag set. */ + channel->work_pending = false; + channel->enabled = true; + smp_wmb(); + + /* Fill the queues before enabling NAPI */ + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_fast_push_rx_descriptors(rx_queue); + + napi_enable(&channel->napi_str); + } + + /* This disables event queue processing and packet transmission. + * This function does not guarantee that all queue processing + * (e.g. RX refill) is complete. + */ + static void efx_stop_channel(struct efx_channel *channel) + { + if (!channel->enabled) + return; + + netif_dbg(channel->efx, ifdown, channel->efx->net_dev, + "stop chan %d\n", channel->channel); + + channel->enabled = false; + napi_disable(&channel->napi_str); + } + + static void efx_fini_channels(struct efx_nic *efx) + { + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->port_enabled); + + rc = efx_nic_flush_queues(efx); + if (rc && EFX_WORKAROUND_7803(efx)) { + /* Schedule a reset to recover from the flush failure. The + * descriptor caches reference memory we're about to free, + * but falcon_reconfigure_mac_wrapper() won't reconnect + * the MACs because of the pending reset. */ + netif_err(efx, drv, efx->net_dev, + "Resetting to recover from flush failure\n"); + efx_schedule_reset(efx, RESET_TYPE_ALL); + } else if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } + + efx_for_each_channel(channel, efx) { + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "shut down chan %d\n", channel->channel); + + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_fini_rx_queue(rx_queue); + efx_for_each_possible_channel_tx_queue(tx_queue, channel) + efx_fini_tx_queue(tx_queue); + efx_fini_eventq(channel); + } + } + + static void efx_remove_channel(struct efx_channel *channel) + { + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "destroy chan %d\n", channel->channel); + + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_remove_rx_queue(rx_queue); + efx_for_each_possible_channel_tx_queue(tx_queue, channel) + efx_remove_tx_queue(tx_queue); + efx_remove_eventq(channel); + } + + static void efx_remove_channels(struct efx_nic *efx) + { + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_remove_channel(channel); + } + + int + efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) + { + struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; + u32 old_rxq_entries, old_txq_entries; + unsigned i; + int rc; + + efx_stop_all(efx); + efx_fini_channels(efx); + + /* Clone channels */ + memset(other_channel, 0, sizeof(other_channel)); + for (i = 0; i < efx->n_channels; i++) { + channel = efx_alloc_channel(efx, i, efx->channel[i]); + if (!channel) { + rc = -ENOMEM; + goto out; + } + other_channel[i] = channel; + } + + /* Swap entry counts and channel pointers */ + old_rxq_entries = efx->rxq_entries; + old_txq_entries = efx->txq_entries; + efx->rxq_entries = rxq_entries; + efx->txq_entries = txq_entries; + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + efx->channel[i] = other_channel[i]; + other_channel[i] = channel; + } + + rc = efx_probe_channels(efx); + if (rc) + goto rollback; + + efx_init_napi(efx); + + /* Destroy old channels */ + for (i = 0; i < efx->n_channels; i++) { + efx_fini_napi_channel(other_channel[i]); + efx_remove_channel(other_channel[i]); + } + out: + /* Free unused channel structures */ + for (i = 0; i < efx->n_channels; i++) + kfree(other_channel[i]); + + efx_init_channels(efx); + efx_start_all(efx); + return rc; + + rollback: + /* Swap back */ + efx->rxq_entries = old_rxq_entries; + efx->txq_entries = old_txq_entries; + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + efx->channel[i] = other_channel[i]; + other_channel[i] = channel; + } + goto out; + } + + void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) + { + mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); + } + + /************************************************************************** + * + * Port handling + * + **************************************************************************/ + + /* This ensures that the kernel is kept informed (via + * netif_carrier_on/off) of the link status, and also maintains the + * link status's stop on the port's TX queue. + */ + void efx_link_status_changed(struct efx_nic *efx) + { + struct efx_link_state *link_state = &efx->link_state; + + /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure + * that no events are triggered between unregister_netdev() and the + * driver unloading. A more general condition is that NETDEV_CHANGE + * can only be generated between NETDEV_UP and NETDEV_DOWN */ + if (!netif_running(efx->net_dev)) + return; + + if (link_state->up != netif_carrier_ok(efx->net_dev)) { + efx->n_link_state_changes++; + + if (link_state->up) + netif_carrier_on(efx->net_dev); + else + netif_carrier_off(efx->net_dev); + } + + /* Status message for kernel log */ + if (link_state->up) { + netif_info(efx, link, efx->net_dev, + "link up at %uMbps %s-duplex (MTU %d)%s\n", + link_state->speed, link_state->fd ? "full" : "half", + efx->net_dev->mtu, + (efx->promiscuous ? " [PROMISC]" : "")); + } else { + netif_info(efx, link, efx->net_dev, "link down\n"); + } + + } + + void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) + { + efx->link_advertising = advertising; + if (advertising) { + if (advertising & ADVERTISED_Pause) + efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); + else + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); + if (advertising & ADVERTISED_Asym_Pause) + efx->wanted_fc ^= EFX_FC_TX; + } + } + + void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) + { + efx->wanted_fc = wanted_fc; + if (efx->link_advertising) { + if (wanted_fc & EFX_FC_RX) + efx->link_advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else + efx->link_advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + if (wanted_fc & EFX_FC_TX) + efx->link_advertising ^= ADVERTISED_Asym_Pause; + } + } + + static void efx_fini_port(struct efx_nic *efx); + + /* Push loopback/power/transmit disable settings to the PHY, and reconfigure + * the MAC appropriately. All other PHY configuration changes are pushed + * through phy_op->set_settings(), and pushed asynchronously to the MAC + * through efx_monitor(). + * + * Callers must hold the mac_lock + */ + int __efx_reconfigure_port(struct efx_nic *efx) + { + enum efx_phy_mode phy_mode; + int rc; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + /* Serialise the promiscuous flag with efx_set_multicast_list. */ + if (efx_dev_registered(efx)) { + netif_addr_lock_bh(efx->net_dev); + netif_addr_unlock_bh(efx->net_dev); + } + + /* Disable PHY transmit in mac level loopbacks */ + phy_mode = efx->phy_mode; + if (LOOPBACK_INTERNAL(efx)) + efx->phy_mode |= PHY_MODE_TX_DISABLED; + else + efx->phy_mode &= ~PHY_MODE_TX_DISABLED; + + rc = efx->type->reconfigure_port(efx); + + if (rc) + efx->phy_mode = phy_mode; + + return rc; + } + + /* Reinitialise the MAC to pick up new PHY settings, even if the port is + * disabled. */ + int efx_reconfigure_port(struct efx_nic *efx) + { + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + mutex_lock(&efx->mac_lock); + rc = __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + return rc; + } + + /* Asynchronous work item for changing MAC promiscuity and multicast + * hash. Avoid a drain/rx_ingress enable by reconfiguring the current + * MAC directly. */ + static void efx_mac_work(struct work_struct *data) + { + struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); + + mutex_lock(&efx->mac_lock); + if (efx->port_enabled) { + efx->type->push_multicast_hash(efx); + efx->mac_op->reconfigure(efx); + } + mutex_unlock(&efx->mac_lock); + } + + static int efx_probe_port(struct efx_nic *efx) + { + unsigned char *perm_addr; + int rc; + + netif_dbg(efx, probe, efx->net_dev, "create port\n"); + + if (phy_flash_cfg) + efx->phy_mode = PHY_MODE_SPECIAL; + + /* Connect up MAC/PHY operations table */ + rc = efx->type->probe_port(efx); + if (rc) + return rc; + + /* Sanity check MAC address */ + perm_addr = efx->net_dev->perm_addr; + if (is_valid_ether_addr(perm_addr)) { + memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN); + } else { + netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", + perm_addr); + if (!allow_bad_hwaddr) { + rc = -EINVAL; + goto err; + } + random_ether_addr(efx->net_dev->dev_addr); + netif_info(efx, probe, efx->net_dev, + "using locally-generated MAC %pM\n", + efx->net_dev->dev_addr); + } + + return 0; + + err: + efx->type->remove_port(efx); + return rc; + } + + static int efx_init_port(struct efx_nic *efx) + { + int rc; + + netif_dbg(efx, drv, efx->net_dev, "init port\n"); + + mutex_lock(&efx->mac_lock); + + rc = efx->phy_op->init(efx); + if (rc) + goto fail1; + + efx->port_initialized = true; + + /* Reconfigure the MAC before creating dma queues (required for + * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ + efx->mac_op->reconfigure(efx); + + /* Ensure the PHY advertises the correct flow control settings */ + rc = efx->phy_op->reconfigure(efx); + if (rc) + goto fail2; + + mutex_unlock(&efx->mac_lock); + return 0; + + fail2: + efx->phy_op->fini(efx); + fail1: + mutex_unlock(&efx->mac_lock); + return rc; + } + + static void efx_start_port(struct efx_nic *efx) + { + netif_dbg(efx, ifup, efx->net_dev, "start port\n"); + BUG_ON(efx->port_enabled); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = true; + + /* efx_mac_work() might have been scheduled after efx_stop_port(), + * and then cancelled by efx_flush_all() */ + efx->type->push_multicast_hash(efx); + efx->mac_op->reconfigure(efx); + + mutex_unlock(&efx->mac_lock); + } + + /* Prevent efx_mac_work() and efx_monitor() from working */ + static void efx_stop_port(struct efx_nic *efx) + { + netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = false; + mutex_unlock(&efx->mac_lock); + + /* Serialise against efx_set_multicast_list() */ + if (efx_dev_registered(efx)) { + netif_addr_lock_bh(efx->net_dev); + netif_addr_unlock_bh(efx->net_dev); + } + } + + static void efx_fini_port(struct efx_nic *efx) + { + netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); + + if (!efx->port_initialized) + return; + + efx->phy_op->fini(efx); + efx->port_initialized = false; + + efx->link_state.up = false; + efx_link_status_changed(efx); + } + + static void efx_remove_port(struct efx_nic *efx) + { + netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); + + efx->type->remove_port(efx); + } + + /************************************************************************** + * + * NIC handling + * + **************************************************************************/ + + /* This configures the PCI device to enable I/O and DMA. */ + static int efx_init_io(struct efx_nic *efx) + { + struct pci_dev *pci_dev = efx->pci_dev; + dma_addr_t dma_mask = efx->type->max_dma_mask; + int rc; + + netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); + + rc = pci_enable_device(pci_dev); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to enable PCI device\n"); + goto fail1; + } + + pci_set_master(pci_dev); + + /* Set the PCI DMA mask. Try all possibilities from our + * genuine mask down to 32 bits, because some architectures + * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit + * masks event though they reject 46 bit masks. + */ + while (dma_mask > 0x7fffffffUL) { + if (pci_dma_supported(pci_dev, dma_mask) && + ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) + break; + dma_mask >>= 1; + } + if (rc) { + netif_err(efx, probe, efx->net_dev, + "could not find a suitable DMA mask\n"); + goto fail2; + } + netif_dbg(efx, probe, efx->net_dev, + "using DMA mask %llx\n", (unsigned long long) dma_mask); + rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); + if (rc) { + /* pci_set_consistent_dma_mask() is not *allowed* to + * fail with a mask that pci_set_dma_mask() accepted, + * but just in case... + */ + netif_err(efx, probe, efx->net_dev, + "failed to set consistent DMA mask\n"); + goto fail2; + } + + efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); + rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "request for memory BAR failed\n"); + rc = -EIO; + goto fail3; + } + efx->membase = ioremap_nocache(efx->membase_phys, + efx->type->mem_map_size); + if (!efx->membase) { + netif_err(efx, probe, efx->net_dev, + "could not map memory BAR at %llx+%x\n", + (unsigned long long)efx->membase_phys, + efx->type->mem_map_size); + rc = -ENOMEM; + goto fail4; + } + netif_dbg(efx, probe, efx->net_dev, + "memory BAR at %llx+%x (virtual %p)\n", + (unsigned long long)efx->membase_phys, + efx->type->mem_map_size, efx->membase); + + return 0; + + fail4: + pci_release_region(efx->pci_dev, EFX_MEM_BAR); + fail3: + efx->membase_phys = 0; + fail2: + pci_disable_device(efx->pci_dev); + fail1: + return rc; + } + + static void efx_fini_io(struct efx_nic *efx) + { + netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); + + if (efx->membase) { + iounmap(efx->membase); + efx->membase = NULL; + } + + if (efx->membase_phys) { + pci_release_region(efx->pci_dev, EFX_MEM_BAR); + efx->membase_phys = 0; + } + + pci_disable_device(efx->pci_dev); + } + + /* Get number of channels wanted. Each channel will have its own IRQ, + * 1 RX queue and/or 2 TX queues. */ + static int efx_wanted_channels(void) + { + cpumask_var_t core_mask; + int count; + int cpu; + + if (rss_cpus) + return rss_cpus; + + if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { + printk(KERN_WARNING + "sfc: RSS disabled due to allocation failure\n"); + return 1; + } + + count = 0; + for_each_online_cpu(cpu) { + if (!cpumask_test_cpu(cpu, core_mask)) { + ++count; + cpumask_or(core_mask, core_mask, + topology_core_cpumask(cpu)); + } + } + + free_cpumask_var(core_mask); + return count; + } + + static int + efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) + { + #ifdef CONFIG_RFS_ACCEL + int i, rc; + + efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); + if (!efx->net_dev->rx_cpu_rmap) + return -ENOMEM; + for (i = 0; i < efx->n_rx_channels; i++) { + rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, + xentries[i].vector); + if (rc) { + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; + return rc; + } + } + #endif + return 0; + } + + /* Probe the number and type of interrupts we are able to obtain, and + * the resulting numbers of channels and RX queues. + */ + static int efx_probe_interrupts(struct efx_nic *efx) + { + int max_channels = + min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); + int rc, i; + + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + struct msix_entry xentries[EFX_MAX_CHANNELS]; + int n_channels; + + n_channels = efx_wanted_channels(); + if (separate_tx_channels) + n_channels *= 2; + n_channels = min(n_channels, max_channels); + + for (i = 0; i < n_channels; i++) + xentries[i].entry = i; + rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); + if (rc > 0) { + netif_err(efx, drv, efx->net_dev, + "WARNING: Insufficient MSI-X vectors" + " available (%d < %d).\n", rc, n_channels); + netif_err(efx, drv, efx->net_dev, + "WARNING: Performance may be reduced.\n"); + EFX_BUG_ON_PARANOID(rc >= n_channels); + n_channels = rc; + rc = pci_enable_msix(efx->pci_dev, xentries, + n_channels); + } + + if (rc == 0) { + efx->n_channels = n_channels; + if (separate_tx_channels) { + efx->n_tx_channels = + max(efx->n_channels / 2, 1U); + efx->n_rx_channels = + max(efx->n_channels - + efx->n_tx_channels, 1U); + } else { + efx->n_tx_channels = efx->n_channels; + efx->n_rx_channels = efx->n_channels; + } + rc = efx_init_rx_cpu_rmap(efx, xentries); + if (rc) { + pci_disable_msix(efx->pci_dev); + return rc; + } + for (i = 0; i < n_channels; i++) + efx_get_channel(efx, i)->irq = + xentries[i].vector; + } else { + /* Fall back to single channel MSI */ + efx->interrupt_mode = EFX_INT_MODE_MSI; + netif_err(efx, drv, efx->net_dev, + "could not enable MSI-X\n"); + } + } + + /* Try single interrupt MSI */ + if (efx->interrupt_mode == EFX_INT_MODE_MSI) { + efx->n_channels = 1; + efx->n_rx_channels = 1; + efx->n_tx_channels = 1; + rc = pci_enable_msi(efx->pci_dev); + if (rc == 0) { + efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; + } else { + netif_err(efx, drv, efx->net_dev, + "could not enable MSI\n"); + efx->interrupt_mode = EFX_INT_MODE_LEGACY; + } + } + + /* Assume legacy interrupts */ + if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { + efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); + efx->n_rx_channels = 1; + efx->n_tx_channels = 1; + efx->legacy_irq = efx->pci_dev->irq; + } + + return 0; + } + + static void efx_remove_interrupts(struct efx_nic *efx) + { + struct efx_channel *channel; + + /* Remove MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + channel->irq = 0; + pci_disable_msi(efx->pci_dev); + pci_disable_msix(efx->pci_dev); + + /* Remove legacy interrupt */ + efx->legacy_irq = 0; + } + + static void efx_set_channels(struct efx_nic *efx) + { + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + efx->tx_channel_offset = + separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; + + /* We need to adjust the TX queue numbers if we have separate + * RX-only and TX-only channels. + */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) + tx_queue->queue -= (efx->tx_channel_offset * + EFX_TXQ_TYPES); + } + } + + static int efx_probe_nic(struct efx_nic *efx) + { + size_t i; + int rc; + + netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); + + /* Carry out hardware-type specific initialisation */ + rc = efx->type->probe(efx); + if (rc) + return rc; + + /* Determine the number of channels and queues by trying to hook + * in MSI-X interrupts. */ + rc = efx_probe_interrupts(efx); + if (rc) + goto fail; + + if (efx->n_channels > 1) + get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + efx->rx_indir_table[i] = i % efx->n_rx_channels; + + efx_set_channels(efx); + netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); + netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); + + /* Initialise the interrupt moderation settings */ + efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, + true); + + return 0; + + fail: + efx->type->remove(efx); + return rc; + } + + static void efx_remove_nic(struct efx_nic *efx) + { + netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); + + efx_remove_interrupts(efx); + efx->type->remove(efx); + } + + /************************************************************************** + * + * NIC startup/shutdown + * + *************************************************************************/ + + static int efx_probe_all(struct efx_nic *efx) + { + int rc; + + rc = efx_probe_nic(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); + goto fail1; + } + + rc = efx_probe_port(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to create port\n"); + goto fail2; + } + + efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; + rc = efx_probe_channels(efx); + if (rc) + goto fail3; + + rc = efx_probe_filters(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to create filter tables\n"); + goto fail4; + } + + return 0; + + fail4: + efx_remove_channels(efx); + fail3: + efx_remove_port(efx); + fail2: + efx_remove_nic(efx); + fail1: + return rc; + } + + /* Called after previous invocation(s) of efx_stop_all, restarts the + * port, kernel transmit queue, NAPI processing and hardware interrupts, + * and ensures that the port is scheduled to be reconfigured. + * This function is safe to call multiple times when the NIC is in any + * state. */ + static void efx_start_all(struct efx_nic *efx) + { + struct efx_channel *channel; + + EFX_ASSERT_RESET_SERIALISED(efx); + + /* Check that it is appropriate to restart the interface. All + * of these flags are safe to read under just the rtnl lock */ + if (efx->port_enabled) + return; + if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) + return; + if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) + return; + + /* Mark the port as enabled so port reconfigurations can start, then + * restart the transmit interface early so the watchdog timer stops */ + efx_start_port(efx); + + if (efx_dev_registered(efx) && netif_device_present(efx->net_dev)) + netif_tx_wake_all_queues(efx->net_dev); + + efx_for_each_channel(channel, efx) + efx_start_channel(channel); + + if (efx->legacy_irq) + efx->legacy_irq_enabled = true; + efx_nic_enable_interrupts(efx); + + /* Switch to event based MCDI completions after enabling interrupts. + * If a reset has been scheduled, then we need to stay in polled mode. + * Rather than serialising efx_mcdi_mode_event() [which sleeps] and + * reset_pending [modified from an atomic context], we instead guarantee + * that efx_mcdi_mode_poll() isn't reverted erroneously */ + efx_mcdi_mode_event(efx); + if (efx->reset_pending) + efx_mcdi_mode_poll(efx); + + /* Start the hardware monitor if there is one. Otherwise (we're link + * event driven), we have to poll the PHY because after an event queue + * flush, we could have a missed a link state change */ + if (efx->type->monitor != NULL) { + queue_delayed_work(efx->workqueue, &efx->monitor_work, + efx_monitor_interval); + } else { + mutex_lock(&efx->mac_lock); + if (efx->phy_op->poll(efx)) + efx_link_status_changed(efx); + mutex_unlock(&efx->mac_lock); + } + + efx->type->start_stats(efx); + } + + /* Flush all delayed work. Should only be called when no more delayed work + * will be scheduled. This doesn't flush pending online resets (efx_reset), + * since we're holding the rtnl_lock at this point. */ + static void efx_flush_all(struct efx_nic *efx) + { + /* Make sure the hardware monitor is stopped */ + cancel_delayed_work_sync(&efx->monitor_work); + /* Stop scheduled port reconfigurations */ + cancel_work_sync(&efx->mac_work); + } + + /* Quiesce hardware and software without bringing the link down. + * Safe to call multiple times, when the nic and interface is in any + * state. The caller is guaranteed to subsequently be in a position + * to modify any hardware and software state they see fit without + * taking locks. */ + static void efx_stop_all(struct efx_nic *efx) + { + struct efx_channel *channel; + + EFX_ASSERT_RESET_SERIALISED(efx); + + /* port_enabled can be read safely under the rtnl lock */ + if (!efx->port_enabled) + return; + + efx->type->stop_stats(efx); + + /* Switch to MCDI polling on Siena before disabling interrupts */ + efx_mcdi_mode_poll(efx); + + /* Disable interrupts and wait for ISR to complete */ + efx_nic_disable_interrupts(efx); + if (efx->legacy_irq) { + synchronize_irq(efx->legacy_irq); + efx->legacy_irq_enabled = false; + } + efx_for_each_channel(channel, efx) { + if (channel->irq) + synchronize_irq(channel->irq); + } + + /* Stop all NAPI processing and synchronous rx refills */ + efx_for_each_channel(channel, efx) + efx_stop_channel(channel); + + /* Stop all asynchronous port reconfigurations. Since all + * event processing has already been stopped, there is no + * window to loose phy events */ + efx_stop_port(efx); + + /* Flush efx_mac_work(), refill_workqueue, monitor_work */ + efx_flush_all(efx); + + /* Stop the kernel transmit interface late, so the watchdog + * timer isn't ticking over the flush */ + if (efx_dev_registered(efx)) { + netif_tx_stop_all_queues(efx->net_dev); + netif_tx_lock_bh(efx->net_dev); + netif_tx_unlock_bh(efx->net_dev); + } + } + + static void efx_remove_all(struct efx_nic *efx) + { + efx_remove_filters(efx); + efx_remove_channels(efx); + efx_remove_port(efx); + efx_remove_nic(efx); + } + + /************************************************************************** + * + * Interrupt moderation + * + **************************************************************************/ + + static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution) + { + if (usecs == 0) + return 0; + if (usecs < resolution) + return 1; /* never round down to 0 */ + return usecs / resolution; + } + + /* Set interrupt moderation parameters */ + int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx) + { + struct efx_channel *channel; + unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); + unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); + + EFX_ASSERT_RESET_SERIALISED(efx); + + if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX) + return -EINVAL; + + if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && + !rx_may_override_tx) { + netif_err(efx, drv, efx->net_dev, "Channels are shared. " + "RX and TX IRQ moderation must be equal\n"); + return -EINVAL; + } + + efx->irq_rx_adaptive = rx_adaptive; + efx->irq_rx_moderation = rx_ticks; + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) + channel->irq_moderation = rx_ticks; + else if (efx_channel_has_tx_queues(channel)) + channel->irq_moderation = tx_ticks; + } + + return 0; + } + + void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive) + { + *rx_adaptive = efx->irq_rx_adaptive; + *rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION; + + /* If channels are shared between RX and TX, so is IRQ + * moderation. Otherwise, IRQ moderation is the same for all + * TX channels and is not adaptive. + */ + if (efx->tx_channel_offset == 0) + *tx_usecs = *rx_usecs; + else + *tx_usecs = + efx->channel[efx->tx_channel_offset]->irq_moderation * + EFX_IRQ_MOD_RESOLUTION; + } + + /************************************************************************** + * + * Hardware monitor + * + **************************************************************************/ + + /* Run periodically off the general workqueue */ + static void efx_monitor(struct work_struct *data) + { + struct efx_nic *efx = container_of(data, struct efx_nic, + monitor_work.work); + + netif_vdbg(efx, timer, efx->net_dev, + "hardware monitor executing on CPU %d\n", + raw_smp_processor_id()); + BUG_ON(efx->type->monitor == NULL); + + /* If the mac_lock is already held then it is likely a port + * reconfiguration is already in place, which will likely do + * most of the work of monitor() anyway. */ + if (mutex_trylock(&efx->mac_lock)) { + if (efx->port_enabled) + efx->type->monitor(efx); + mutex_unlock(&efx->mac_lock); + } + + queue_delayed_work(efx->workqueue, &efx->monitor_work, + efx_monitor_interval); + } + + /************************************************************************** + * + * ioctls + * + *************************************************************************/ + + /* Net device ioctl + * Context: process, rtnl_lock() held. + */ + static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) + { + struct efx_nic *efx = netdev_priv(net_dev); + struct mii_ioctl_data *data = if_mii(ifr); + + EFX_ASSERT_RESET_SERIALISED(efx); + + /* Convert phy_id from older PRTAD/DEVAD format */ + if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && + (data->phy_id & 0xfc00) == 0x0400) + data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; + + return mdio_mii_ioctl(&efx->mdio, data, cmd); + } + + /************************************************************************** + * + * NAPI interface + * + **************************************************************************/ + + static void efx_init_napi(struct efx_nic *efx) + { + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + channel->napi_dev = efx->net_dev; + netif_napi_add(channel->napi_dev, &channel->napi_str, + efx_poll, napi_weight); + } + } + + static void efx_fini_napi_channel(struct efx_channel *channel) + { + if (channel->napi_dev) + netif_napi_del(&channel->napi_str); + channel->napi_dev = NULL; + } + + static void efx_fini_napi(struct efx_nic *efx) + { + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_fini_napi_channel(channel); + } + + /************************************************************************** + * + * Kernel netpoll interface + * + *************************************************************************/ + + #ifdef CONFIG_NET_POLL_CONTROLLER + + /* Although in the common case interrupts will be disabled, this is not + * guaranteed. However, all our work happens inside the NAPI callback, + * so no locking is required. + */ + static void efx_netpoll(struct net_device *net_dev) + { + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_schedule_channel(channel); + } + + #endif + + /************************************************************************** + * + * Kernel net device interface + * + *************************************************************************/ + + /* Context: process, rtnl_lock() held. */ + static int efx_net_open(struct net_device *net_dev) + { + struct efx_nic *efx = netdev_priv(net_dev); + EFX_ASSERT_RESET_SERIALISED(efx); + + netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", + raw_smp_processor_id()); + + if (efx->state == STATE_DISABLED) + return -EIO; + if (efx->phy_mode & PHY_MODE_SPECIAL) + return -EBUSY; + if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) + return -EIO; + + /* Notify the kernel of the link state polled during driver load, + * before the monitor starts running */ + efx_link_status_changed(efx); + + efx_start_all(efx); + return 0; + } + + /* Context: process, rtnl_lock() held. + * Note that the kernel will ignore our return code; this method + * should really be a void. + */ + static int efx_net_stop(struct net_device *net_dev) + { + struct efx_nic *efx = netdev_priv(net_dev); + + netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", + raw_smp_processor_id()); + + if (efx->state != STATE_DISABLED) { + /* Stop the device and flush all the channels */ + efx_stop_all(efx); + efx_fini_channels(efx); + efx_init_channels(efx); + } + + return 0; + } + + /* Context: process, dev_base_lock or RTNL held, non-blocking. */ + static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) + { + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_mac_stats *mac_stats = &efx->mac_stats; + + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx); + spin_unlock_bh(&efx->stats_lock); + + stats->rx_packets = mac_stats->rx_packets; + stats->tx_packets = mac_stats->tx_packets; + stats->rx_bytes = mac_stats->rx_bytes; + stats->tx_bytes = mac_stats->tx_bytes; + stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; + stats->multicast = mac_stats->rx_multicast; + stats->collisions = mac_stats->tx_collision; + stats->rx_length_errors = (mac_stats->rx_gtjumbo + + mac_stats->rx_length_error); + stats->rx_crc_errors = mac_stats->rx_bad; + stats->rx_frame_errors = mac_stats->rx_align_error; + stats->rx_fifo_errors = mac_stats->rx_overflow; + stats->rx_missed_errors = mac_stats->rx_missed; + stats->tx_window_errors = mac_stats->tx_late_collision; + + stats->rx_errors = (stats->rx_length_errors + + stats->rx_crc_errors + + stats->rx_frame_errors + + mac_stats->rx_symbol_error); + stats->tx_errors = (stats->tx_window_errors + + mac_stats->tx_bad); + + return stats; + } + + /* Context: netif_tx_lock held, BHs disabled. */ + static void efx_watchdog(struct net_device *net_dev) + { + struct efx_nic *efx = netdev_priv(net_dev); + + netif_err(efx, tx_err, efx->net_dev, + "TX stuck with port_enabled=%d: resetting channels\n", + efx->port_enabled); + + efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); + } + + + /* Context: process, rtnl_lock() held. */ + static int efx_change_mtu(struct net_device *net_dev, int new_mtu) + { + struct efx_nic *efx = netdev_priv(net_dev); + int rc = 0; + + EFX_ASSERT_RESET_SERIALISED(efx); + + if (new_mtu > EFX_MAX_MTU) + return -EINVAL; + + efx_stop_all(efx); + + netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); + + efx_fini_channels(efx); + + mutex_lock(&efx->mac_lock); + /* Reconfigure the MAC before enabling the dma queues so that + * the RX buffers don't overflow */ + net_dev->mtu = new_mtu; + efx->mac_op->reconfigure(efx); + mutex_unlock(&efx->mac_lock); + + efx_init_channels(efx); + + efx_start_all(efx); + return rc; + } + + static int efx_set_mac_address(struct net_device *net_dev, void *data) + { + struct efx_nic *efx = netdev_priv(net_dev); + struct sockaddr *addr = data; + char *new_addr = addr->sa_data; + + EFX_ASSERT_RESET_SERIALISED(efx); + + if (!is_valid_ether_addr(new_addr)) { + netif_err(efx, drv, efx->net_dev, + "invalid ethernet MAC address requested: %pM\n", + new_addr); + return -EINVAL; + } + + memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); + + /* Reconfigure the MAC */ + mutex_lock(&efx->mac_lock); + efx->mac_op->reconfigure(efx); + mutex_unlock(&efx->mac_lock); + + return 0; + } + + /* Context: netif_addr_lock held, BHs disabled. */ + static void efx_set_multicast_list(struct net_device *net_dev) + { + struct efx_nic *efx = netdev_priv(net_dev); + struct netdev_hw_addr *ha; + union efx_multicast_hash *mc_hash = &efx->multicast_hash; + u32 crc; + int bit; + + efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); + + /* Build multicast hash table */ + if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { + memset(mc_hash, 0xff, sizeof(*mc_hash)); + } else { + memset(mc_hash, 0x00, sizeof(*mc_hash)); + netdev_for_each_mc_addr(ha, net_dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); + set_bit_le(bit, mc_hash->byte); + } + + /* Broadcast packets go through the multicast hash filter. + * ether_crc_le() of the broadcast address is 0xbe2612ff + * so we always add bit 0xff to the mask. + */ + set_bit_le(0xff, mc_hash->byte); + } + + if (efx->port_enabled) + queue_work(efx->workqueue, &efx->mac_work); + /* Otherwise efx_start_port() will do this */ + } + + static int efx_set_features(struct net_device *net_dev, u32 data) + { + struct efx_nic *efx = netdev_priv(net_dev); + + /* If disabling RX n-tuple filtering, clear existing filters */ + if (net_dev->features & ~data & NETIF_F_NTUPLE) + efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + + return 0; + } + + static const struct net_device_ops efx_netdev_ops = { + .ndo_open = efx_net_open, + .ndo_stop = efx_net_stop, + .ndo_get_stats64 = efx_net_stats, + .ndo_tx_timeout = efx_watchdog, + .ndo_start_xmit = efx_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = efx_ioctl, + .ndo_change_mtu = efx_change_mtu, + .ndo_set_mac_address = efx_set_mac_address, + .ndo_set_rx_mode = efx_set_multicast_list, + .ndo_set_features = efx_set_features, + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = efx_netpoll, + #endif + .ndo_setup_tc = efx_setup_tc, + #ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, + #endif + }; + + static void efx_update_name(struct efx_nic *efx) + { + strcpy(efx->name, efx->net_dev->name); + efx_mtd_rename(efx); + efx_set_channel_names(efx); + } + + static int efx_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) + { + struct net_device *net_dev = ptr; + + if (net_dev->netdev_ops == &efx_netdev_ops && + event == NETDEV_CHANGENAME) + efx_update_name(netdev_priv(net_dev)); + + return NOTIFY_DONE; + } + + static struct notifier_block efx_netdev_notifier = { + .notifier_call = efx_netdev_event, + }; + + static ssize_t + show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) + { + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + return sprintf(buf, "%d\n", efx->phy_type); + } + static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); + + static int efx_register_netdev(struct efx_nic *efx) + { + struct net_device *net_dev = efx->net_dev; + struct efx_channel *channel; + int rc; + + net_dev->watchdog_timeo = 5 * HZ; + net_dev->irq = efx->pci_dev->irq; + net_dev->netdev_ops = &efx_netdev_ops; + SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); + + /* Clear MAC statistics */ + efx->mac_op->update_stats(efx); + memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); + + rtnl_lock(); + + rc = dev_alloc_name(net_dev, net_dev->name); + if (rc < 0) + goto fail_locked; + efx_update_name(efx); + + rc = register_netdevice(net_dev); + if (rc) + goto fail_locked; + + efx_for_each_channel(channel, efx) { + struct efx_tx_queue *tx_queue; + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_init_tx_queue_core_txq(tx_queue); + } + + /* Always start with carrier off; PHY events will detect the link */ + netif_carrier_off(efx->net_dev); + + rtnl_unlock(); + + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + goto fail_registered; + } + + return 0; + + fail_locked: + rtnl_unlock(); + netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); + return rc; + + fail_registered: + unregister_netdev(net_dev); + return rc; + } + + static void efx_unregister_netdev(struct efx_nic *efx) + { + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + if (!efx->net_dev) + return; + + BUG_ON(netdev_priv(efx->net_dev) != efx); + + /* Free up any skbs still remaining. This has to happen before + * we try to unregister the netdev as running their destructors + * may be needed to get the device ref. count to 0. */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_release_tx_buffers(tx_queue); + } + + if (efx_dev_registered(efx)) { + strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); + unregister_netdev(efx->net_dev); + } + } + + /************************************************************************** + * + * Device reset and suspend + * + **************************************************************************/ + + /* Tears down the entire software state and most of the hardware state + * before reset. */ + void efx_reset_down(struct efx_nic *efx, enum reset_type method) + { + EFX_ASSERT_RESET_SERIALISED(efx); + + efx_stop_all(efx); + mutex_lock(&efx->mac_lock); + + efx_fini_channels(efx); + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) + efx->phy_op->fini(efx); + efx->type->fini(efx); + } + + /* This function will always ensure that the locks acquired in + * efx_reset_down() are released. A failure return code indicates + * that we were unable to reinitialise the hardware, and the + * driver should be disabled. If ok is false, then the rx and tx + * engines are not restarted, pending a RESET_DISABLE. */ + int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) + { + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + rc = efx->type->init(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); + goto fail; + } + + if (!ok) + goto fail; + + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { + rc = efx->phy_op->init(efx); + if (rc) + goto fail; + if (efx->phy_op->reconfigure(efx)) + netif_err(efx, drv, efx->net_dev, + "could not restore PHY settings\n"); + } + + efx->mac_op->reconfigure(efx); + + efx_init_channels(efx); + efx_restore_filters(efx); + + mutex_unlock(&efx->mac_lock); + + efx_start_all(efx); + + return 0; + + fail: + efx->port_initialized = false; + + mutex_unlock(&efx->mac_lock); + + return rc; + } + + /* Reset the NIC using the specified method. Note that the reset may + * fail, in which case the card will be left in an unusable state. + * + * Caller must hold the rtnl_lock. + */ + int efx_reset(struct efx_nic *efx, enum reset_type method) + { + int rc, rc2; + bool disabled; + + netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", + RESET_TYPE(method)); + + netif_device_detach(efx->net_dev); + efx_reset_down(efx, method); + + rc = efx->type->reset(efx, method); + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); + goto out; + } + + /* Clear flags for the scopes we covered. We assume the NIC and + * driver are now quiescent so that there is no race here. + */ + efx->reset_pending &= -(1 << (method + 1)); + + /* Reinitialise bus-mastering, which may have been turned off before + * the reset was scheduled. This is still appropriate, even in the + * RESET_TYPE_DISABLE since this driver generally assumes the hardware + * can respond to requests. */ + pci_set_master(efx->pci_dev); + + out: + /* Leave device stopped if necessary */ + disabled = rc || method == RESET_TYPE_DISABLE; + rc2 = efx_reset_up(efx, method, !disabled); + if (rc2) { + disabled = true; + if (!rc) + rc = rc2; + } + + if (disabled) { + dev_close(efx->net_dev); + netif_err(efx, drv, efx->net_dev, "has been disabled\n"); + efx->state = STATE_DISABLED; + } else { + netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); + netif_device_attach(efx->net_dev); + } + return rc; + } + + /* The worker thread exists so that code that cannot sleep can + * schedule a reset for later. + */ + static void efx_reset_work(struct work_struct *data) + { + struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); + unsigned long pending = ACCESS_ONCE(efx->reset_pending); + + if (!pending) + return; + + /* If we're not RUNNING then don't reset. Leave the reset_pending + * flags set so that efx_pci_probe_main will be retried */ + if (efx->state != STATE_RUNNING) { + netif_info(efx, drv, efx->net_dev, + "scheduled reset quenched. NIC not RUNNING\n"); + return; + } + + rtnl_lock(); + (void)efx_reset(efx, fls(pending) - 1); + rtnl_unlock(); + } + + void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) + { + enum reset_type method; + + switch (type) { + case RESET_TYPE_INVISIBLE: + case RESET_TYPE_ALL: + case RESET_TYPE_WORLD: + case RESET_TYPE_DISABLE: + method = type; + netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", + RESET_TYPE(method)); + break; + default: + method = efx->type->map_reset_reason(type); + netif_dbg(efx, drv, efx->net_dev, + "scheduling %s reset for %s\n", + RESET_TYPE(method), RESET_TYPE(type)); + break; + } + + set_bit(method, &efx->reset_pending); + + /* efx_process_channel() will no longer read events once a + * reset is scheduled. So switch back to poll'd MCDI completions. */ + efx_mcdi_mode_poll(efx); + + queue_work(reset_workqueue, &efx->reset_work); + } + + /************************************************************************** + * + * List of NICs we support + * + **************************************************************************/ + + /* PCI device ID table */ + static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { - {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), ++ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, ++ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), + .driver_data = (unsigned long) &falcon_a1_nic_type}, - {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), ++ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, ++ PCI_DEVICE_ID_SOLARFLARE_SFC4000B), + .driver_data = (unsigned long) &falcon_b0_nic_type}, - {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), ++ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID), + .driver_data = (unsigned long) &siena_a0_nic_type}, - {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), ++ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID), + .driver_data = (unsigned long) &siena_a0_nic_type}, + {0} /* end of list */ + }; + + /************************************************************************** + * + * Dummy PHY/MAC operations + * + * Can be used for some unimplemented operations + * Needed so all function pointers are valid and do not have to be tested + * before use + * + **************************************************************************/ + int efx_port_dummy_op_int(struct efx_nic *efx) + { + return 0; + } + void efx_port_dummy_op_void(struct efx_nic *efx) {} + + static bool efx_port_dummy_op_poll(struct efx_nic *efx) + { + return false; + } + + static const struct efx_phy_operations efx_dummy_phy_operations = { + .init = efx_port_dummy_op_int, + .reconfigure = efx_port_dummy_op_int, + .poll = efx_port_dummy_op_poll, + .fini = efx_port_dummy_op_void, + }; + + /************************************************************************** + * + * Data housekeeping + * + **************************************************************************/ + + /* This zeroes out and then fills in the invariants in a struct + * efx_nic (including all sub-structures). + */ + static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, + struct pci_dev *pci_dev, struct net_device *net_dev) + { + int i; + + /* Initialise common structures */ + memset(efx, 0, sizeof(*efx)); + spin_lock_init(&efx->biu_lock); + #ifdef CONFIG_SFC_MTD + INIT_LIST_HEAD(&efx->mtd_list); + #endif + INIT_WORK(&efx->reset_work, efx_reset_work); + INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); + efx->pci_dev = pci_dev; + efx->msg_enable = debug; + efx->state = STATE_INIT; + strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); + + efx->net_dev = net_dev; + spin_lock_init(&efx->stats_lock); + mutex_init(&efx->mac_lock); + efx->mac_op = type->default_mac_ops; + efx->phy_op = &efx_dummy_phy_operations; + efx->mdio.dev = net_dev; + INIT_WORK(&efx->mac_work, efx_mac_work); + + for (i = 0; i < EFX_MAX_CHANNELS; i++) { + efx->channel[i] = efx_alloc_channel(efx, i, NULL); + if (!efx->channel[i]) + goto fail; + } + + efx->type = type; + + EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); + + /* Higher numbered interrupt modes are less capable! */ + efx->interrupt_mode = max(efx->type->max_interrupt_mode, + interrupt_mode); + + /* Would be good to use the net_dev name, but we're too early */ + snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", + pci_name(pci_dev)); + efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); + if (!efx->workqueue) + goto fail; + + return 0; + + fail: + efx_fini_struct(efx); + return -ENOMEM; + } + + static void efx_fini_struct(struct efx_nic *efx) + { + int i; + + for (i = 0; i < EFX_MAX_CHANNELS; i++) + kfree(efx->channel[i]); + + if (efx->workqueue) { + destroy_workqueue(efx->workqueue); + efx->workqueue = NULL; + } + } + + /************************************************************************** + * + * PCI interface + * + **************************************************************************/ + + /* Main body of final NIC shutdown code + * This is called only at module unload (or hotplug removal). + */ + static void efx_pci_remove_main(struct efx_nic *efx) + { + #ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; + #endif + efx_nic_fini_interrupt(efx); + efx_fini_channels(efx); + efx_fini_port(efx); + efx->type->fini(efx); + efx_fini_napi(efx); + efx_remove_all(efx); + } + + /* Final NIC shutdown + * This is called only at module unload (or hotplug removal). + */ + static void efx_pci_remove(struct pci_dev *pci_dev) + { + struct efx_nic *efx; + + efx = pci_get_drvdata(pci_dev); + if (!efx) + return; + + /* Mark the NIC as fini, then stop the interface */ + rtnl_lock(); + efx->state = STATE_FINI; + dev_close(efx->net_dev); + + /* Allow any queued efx_resets() to complete */ + rtnl_unlock(); + + efx_unregister_netdev(efx); + + efx_mtd_remove(efx); + + /* Wait for any scheduled resets to complete. No more will be + * scheduled from this point because efx_stop_all() has been + * called, we are no longer registered with driverlink, and + * the net_device's have been removed. */ + cancel_work_sync(&efx->reset_work); + + efx_pci_remove_main(efx); + + efx_fini_io(efx); + netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); + + pci_set_drvdata(pci_dev, NULL); + efx_fini_struct(efx); + free_netdev(efx->net_dev); + }; + + /* Main body of NIC initialisation + * This is called at module load (or hotplug insertion, theoretically). + */ + static int efx_pci_probe_main(struct efx_nic *efx) + { + int rc; + + /* Do start-of-day initialisation */ + rc = efx_probe_all(efx); + if (rc) + goto fail1; + + efx_init_napi(efx); + + rc = efx->type->init(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to initialise NIC\n"); + goto fail3; + } + + rc = efx_init_port(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to initialise port\n"); + goto fail4; + } + + efx_init_channels(efx); + + rc = efx_nic_init_interrupt(efx); + if (rc) + goto fail5; + + return 0; + + fail5: + efx_fini_channels(efx); + efx_fini_port(efx); + fail4: + efx->type->fini(efx); + fail3: + efx_fini_napi(efx); + efx_remove_all(efx); + fail1: + return rc; + } + + /* NIC initialisation + * + * This is called at module load (or hotplug insertion, + * theoretically). It sets up PCI mappings, tests and resets the NIC, + * sets up and registers the network devices with the kernel and hooks + * the interrupt service routine. It does not prepare the device for + * transmission; this is left to the first time one of the network + * interfaces is brought up (i.e. efx_net_open). + */ + static int __devinit efx_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *entry) + { + const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; + struct net_device *net_dev; + struct efx_nic *efx; + int i, rc; + + /* Allocate and initialise a struct net_device and struct efx_nic */ + net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, + EFX_MAX_RX_QUEUES); + if (!net_dev) + return -ENOMEM; + net_dev->features |= (type->offload_features | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_TSO | + NETIF_F_RXCSUM); + if (type->offload_features & NETIF_F_V6_CSUM) + net_dev->features |= NETIF_F_TSO6; + /* Mask for features that also apply to VLAN devices */ + net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | + NETIF_F_RXCSUM); + /* All offloads can be toggled */ + net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; + efx = netdev_priv(net_dev); + pci_set_drvdata(pci_dev, efx); + SET_NETDEV_DEV(net_dev, &pci_dev->dev); + rc = efx_init_struct(efx, type, pci_dev, net_dev); + if (rc) + goto fail1; + + netif_info(efx, probe, efx->net_dev, + "Solarflare NIC detected\n"); + + /* Set up basic I/O (BAR mappings etc) */ + rc = efx_init_io(efx); + if (rc) + goto fail2; + + /* No serialisation is required with the reset path because + * we're in STATE_INIT. */ + for (i = 0; i < 5; i++) { + rc = efx_pci_probe_main(efx); + + /* Serialise against efx_reset(). No more resets will be + * scheduled since efx_stop_all() has been called, and we + * have not and never have been registered with either + * the rtnetlink or driverlink layers. */ + cancel_work_sync(&efx->reset_work); + + if (rc == 0) { + if (efx->reset_pending) { + /* If there was a scheduled reset during + * probe, the NIC is probably hosed anyway */ + efx_pci_remove_main(efx); + rc = -EIO; + } else { + break; + } + } + + /* Retry if a recoverably reset event has been scheduled */ + if (efx->reset_pending & + ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) || + !efx->reset_pending) + goto fail3; + + efx->reset_pending = 0; + } + + if (rc) { + netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); + goto fail4; + } + + /* Switch to the running state before we expose the device to the OS, + * so that dev_open()|efx_start_all() will actually start the device */ + efx->state = STATE_RUNNING; + + rc = efx_register_netdev(efx); + if (rc) + goto fail5; + + netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); + + rtnl_lock(); + efx_mtd_probe(efx); /* allowed to fail */ + rtnl_unlock(); + return 0; + + fail5: + efx_pci_remove_main(efx); + fail4: + fail3: + efx_fini_io(efx); + fail2: + efx_fini_struct(efx); + fail1: + WARN_ON(rc > 0); + netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); + free_netdev(net_dev); + return rc; + } + + static int efx_pm_freeze(struct device *dev) + { + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + efx->state = STATE_FINI; + + netif_device_detach(efx->net_dev); + + efx_stop_all(efx); + efx_fini_channels(efx); + + return 0; + } + + static int efx_pm_thaw(struct device *dev) + { + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + efx->state = STATE_INIT; + + efx_init_channels(efx); + + mutex_lock(&efx->mac_lock); + efx->phy_op->reconfigure(efx); + mutex_unlock(&efx->mac_lock); + + efx_start_all(efx); + + netif_device_attach(efx->net_dev); + + efx->state = STATE_RUNNING; + + efx->type->resume_wol(efx); + + /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ + queue_work(reset_workqueue, &efx->reset_work); + + return 0; + } + + static int efx_pm_poweroff(struct device *dev) + { + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + + efx->type->fini(efx); + + efx->reset_pending = 0; + + pci_save_state(pci_dev); + return pci_set_power_state(pci_dev, PCI_D3hot); + } + + /* Used for both resume and restore */ + static int efx_pm_resume(struct device *dev) + { + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + int rc; + + rc = pci_set_power_state(pci_dev, PCI_D0); + if (rc) + return rc; + pci_restore_state(pci_dev); + rc = pci_enable_device(pci_dev); + if (rc) + return rc; + pci_set_master(efx->pci_dev); + rc = efx->type->reset(efx, RESET_TYPE_ALL); + if (rc) + return rc; + rc = efx->type->init(efx); + if (rc) + return rc; + efx_pm_thaw(dev); + return 0; + } + + static int efx_pm_suspend(struct device *dev) + { + int rc; + + efx_pm_freeze(dev); + rc = efx_pm_poweroff(dev); + if (rc) + efx_pm_resume(dev); + return rc; + } + + static struct dev_pm_ops efx_pm_ops = { + .suspend = efx_pm_suspend, + .resume = efx_pm_resume, + .freeze = efx_pm_freeze, + .thaw = efx_pm_thaw, + .poweroff = efx_pm_poweroff, + .restore = efx_pm_resume, + }; + + static struct pci_driver efx_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = efx_pci_table, + .probe = efx_pci_probe, + .remove = efx_pci_remove, + .driver.pm = &efx_pm_ops, + }; + + /************************************************************************** + * + * Kernel module interface + * + *************************************************************************/ + + module_param(interrupt_mode, uint, 0444); + MODULE_PARM_DESC(interrupt_mode, + "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); + + static int __init efx_init_module(void) + { + int rc; + + printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); + + rc = register_netdevice_notifier(&efx_netdev_notifier); + if (rc) + goto err_notifier; + + reset_workqueue = create_singlethread_workqueue("sfc_reset"); + if (!reset_workqueue) { + rc = -ENOMEM; + goto err_reset; + } + + rc = pci_register_driver(&efx_pci_driver); + if (rc < 0) + goto err_pci; + + return 0; + + err_pci: + destroy_workqueue(reset_workqueue); + err_reset: + unregister_netdevice_notifier(&efx_netdev_notifier); + err_notifier: + return rc; + } + + static void __exit efx_exit_module(void) + { + printk(KERN_INFO "Solarflare NET driver unloading\n"); + + pci_unregister_driver(&efx_pci_driver); + destroy_workqueue(reset_workqueue); + unregister_netdevice_notifier(&efx_netdev_notifier); + + } + + module_init(efx_init_module); + module_exit(efx_exit_module); + + MODULE_AUTHOR("Solarflare Communications and " + "Michael Brown mbrown@fensystems.co.uk"); + MODULE_DESCRIPTION("Solarflare Communications network driver"); + MODULE_LICENSE("GPL"); + MODULE_DEVICE_TABLE(pci, efx_pci_table); diff --combined drivers/net/ethernet/sfc/efx.h index 0000000,442f4d0..4764793 mode 000000,100644..100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@@ -1,0 -1,150 +1,146 @@@ + /**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2010 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + + #ifndef EFX_EFX_H + #define EFX_EFX_H + + #include "net_driver.h" + #include "filter.h" + + /* PCI IDs */ -#define EFX_VENDID_SFC 0x1924 -#define FALCON_A_P_DEVID 0x0703 -#define FALCON_A_S_DEVID 0x6703 -#define FALCON_B_P_DEVID 0x0710 + #define BETHPAGE_A_P_DEVID 0x0803 + #define SIENA_A_P_DEVID 0x0813 + + /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ + #define EFX_MEM_BAR 2 + + /* TX */ + extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); + extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); + extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); + extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); + extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); + extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); + extern netdev_tx_t + efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); + extern netdev_tx_t + efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); + extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); + extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); + + /* RX */ + extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); + extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); + extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); + extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); + extern void efx_rx_strategy(struct efx_channel *channel); + extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); + extern void efx_rx_slow_fill(unsigned long context); + extern void __efx_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, bool checksummed); + extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int len, bool checksummed, bool discard); + extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); + + #define EFX_MAX_DMAQ_SIZE 4096UL + #define EFX_DEFAULT_DMAQ_SIZE 1024UL + #define EFX_MIN_DMAQ_SIZE 512UL + + #define EFX_MAX_EVQ_SIZE 16384UL + #define EFX_MIN_EVQ_SIZE 512UL + + /* The smallest [rt]xq_entries that the driver supports. Callers of + * efx_wake_queue() assume that they can subsequently send at least one + * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ + #define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) + + /* Filters */ + extern int efx_probe_filters(struct efx_nic *efx); + extern void efx_restore_filters(struct efx_nic *efx); + extern void efx_remove_filters(struct efx_nic *efx); + extern int efx_filter_insert_filter(struct efx_nic *efx, + struct efx_filter_spec *spec, + bool replace); + extern int efx_filter_remove_filter(struct efx_nic *efx, + struct efx_filter_spec *spec); + extern void efx_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority); + #ifdef CONFIG_RFS_ACCEL + extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); + extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); + static inline void efx_filter_rfs_expire(struct efx_channel *channel) + { + if (channel->rfs_filters_added >= 60 && + __efx_filter_rfs_expire(channel->efx, 100)) + channel->rfs_filters_added -= 60; + } + #define efx_filter_rfs_enabled() 1 + #else + static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} + #define efx_filter_rfs_enabled() 0 + #endif + + /* Channels */ + extern void efx_process_channel_now(struct efx_channel *channel); + extern int + efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); + + /* Ports */ + extern int efx_reconfigure_port(struct efx_nic *efx); + extern int __efx_reconfigure_port(struct efx_nic *efx); + + /* Ethtool support */ + extern const struct ethtool_ops efx_ethtool_ops; + + /* Reset handling */ + extern int efx_reset(struct efx_nic *efx, enum reset_type method); + extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); + extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); + + /* Global */ + extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); + extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx); + extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive); + + /* Dummy PHY ops for PHY drivers */ + extern int efx_port_dummy_op_int(struct efx_nic *efx); + extern void efx_port_dummy_op_void(struct efx_nic *efx); + + + /* MTD */ + #ifdef CONFIG_SFC_MTD + extern int efx_mtd_probe(struct efx_nic *efx); + extern void efx_mtd_rename(struct efx_nic *efx); + extern void efx_mtd_remove(struct efx_nic *efx); + #else + static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } + static inline void efx_mtd_rename(struct efx_nic *efx) {} + static inline void efx_mtd_remove(struct efx_nic *efx) {} + #endif + + static inline void efx_schedule_channel(struct efx_channel *channel) + { + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d scheduling NAPI poll on CPU%d\n", + channel->channel, raw_smp_processor_id()); + channel->work_pending = true; + + napi_schedule(&channel->napi_str); + } + + extern void efx_link_status_changed(struct efx_nic *efx); + extern void efx_link_set_advertising(struct efx_nic *efx, u32); + extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); + + #endif /* EFX_EFX_H */ diff --combined drivers/net/ethernet/sfc/falcon.c index 0000000,4dd1748..97b606b mode 000000,100644..100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@@ -1,0 -1,1843 +1,1844 @@@ + /**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2010 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + + #include <linux/bitops.h> + #include <linux/delay.h> + #include <linux/pci.h> + #include <linux/module.h> + #include <linux/seq_file.h> + #include <linux/i2c.h> + #include <linux/mii.h> + #include <linux/slab.h> + #include "net_driver.h" + #include "bitfield.h" + #include "efx.h" + #include "mac.h" + #include "spi.h" + #include "nic.h" + #include "regs.h" + #include "io.h" + #include "phy.h" + #include "workarounds.h" + + /* Hardware control for SFC4000 (aka Falcon). */ + + static const unsigned int + /* "Large" EEPROM device: Atmel AT25640 or similar + * 8 KB, 16-bit address, 32 B write block */ + large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN) + | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN) + | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)), + /* Default flash device: Atmel AT25F1024 + * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */ + default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN) + | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN) + | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN) + | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) + | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)); + + /************************************************************************** + * + * I2C bus - this is a bit-bashing interface using GPIO pins + * Note that it uses the output enables to tristate the outputs + * SDA is the data pin and SCL is the clock + * + ************************************************************************** + */ + static void falcon_setsda(void *data, int state) + { + struct efx_nic *efx = (struct efx_nic *)data; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AB_GPIO_CTL); + EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state); + efx_writeo(efx, ®, FR_AB_GPIO_CTL); + } + + static void falcon_setscl(void *data, int state) + { + struct efx_nic *efx = (struct efx_nic *)data; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AB_GPIO_CTL); + EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state); + efx_writeo(efx, ®, FR_AB_GPIO_CTL); + } + + static int falcon_getsda(void *data) + { + struct efx_nic *efx = (struct efx_nic *)data; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AB_GPIO_CTL); + return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN); + } + + static int falcon_getscl(void *data) + { + struct efx_nic *efx = (struct efx_nic *)data; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AB_GPIO_CTL); + return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); + } + + static struct i2c_algo_bit_data falcon_i2c_bit_operations = { + .setsda = falcon_setsda, + .setscl = falcon_setscl, + .getsda = falcon_getsda, + .getscl = falcon_getscl, + .udelay = 5, + /* Wait up to 50 ms for slave to let us pull SCL high */ + .timeout = DIV_ROUND_UP(HZ, 20), + }; + + static void falcon_push_irq_moderation(struct efx_channel *channel) + { + efx_dword_t timer_cmd; + struct efx_nic *efx = channel->efx; + + BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_AB_TC_TIMER_VAL_WIDTH)); + + /* Set timer register */ + if (channel->irq_moderation) { + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_AB_TC_TIMER_MODE, + FFE_BB_TIMER_MODE_INT_HLDOFF, + FRF_AB_TC_TIMER_VAL, + channel->irq_moderation - 1); + } else { + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_AB_TC_TIMER_MODE, + FFE_BB_TIMER_MODE_DIS, + FRF_AB_TC_TIMER_VAL, 0); + } + BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0); + efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, + channel->channel); + } + + static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx); + + static void falcon_prepare_flush(struct efx_nic *efx) + { + falcon_deconfigure_mac_wrapper(efx); + + /* Wait for the tx and rx fifo's to get to the next packet boundary + * (~1ms without back-pressure), then to drain the remainder of the + * fifo's at data path speeds (negligible), with a healthy margin. */ + msleep(10); + } + + /* Acknowledge a legacy interrupt from Falcon + * + * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG. + * + * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the + * BIU. Interrupt acknowledge is read sensitive so must write instead + * (then read to ensure the BIU collector is flushed) + * + * NB most hardware supports MSI interrupts + */ + inline void falcon_irq_ack_a1(struct efx_nic *efx) + { + efx_dword_t reg; + + EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e); + efx_writed(efx, ®, FR_AA_INT_ACK_KER); + efx_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS); + } + + + irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) + { + struct efx_nic *efx = dev_id; + efx_oword_t *int_ker = efx->irq_status.addr; + int syserr; + int queues; + + /* Check to see if this is our interrupt. If it isn't, we + * exit without having touched the hardware. + */ + if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d not for me\n", irq, + raw_smp_processor_id()); + return IRQ_NONE; + } + efx->last_irq_cpu = raw_smp_processor_id(); + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + + /* Determine interrupting queues, clear interrupt status + * register and acknowledge the device interrupt. + */ + BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); + queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); + + /* Check to see if we have a serious error condition */ + if (queues & (1U << efx->fatal_irq_level)) { + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_nic_fatal_interrupt(efx); + } + + EFX_ZERO_OWORD(*int_ker); + wmb(); /* Ensure the vector is cleared before interrupt ack */ + falcon_irq_ack_a1(efx); + + if (queues & 1) + efx_schedule_channel(efx_get_channel(efx, 0)); + if (queues & 2) + efx_schedule_channel(efx_get_channel(efx, 1)); + return IRQ_HANDLED; + } + /************************************************************************** + * + * EEPROM/flash + * + ************************************************************************** + */ + + #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) + + static int falcon_spi_poll(struct efx_nic *efx) + { + efx_oword_t reg; + efx_reado(efx, ®, FR_AB_EE_SPI_HCMD); + return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; + } + + /* Wait for SPI command completion */ + static int falcon_spi_wait(struct efx_nic *efx) + { + /* Most commands will finish quickly, so we start polling at + * very short intervals. Sometimes the command may have to + * wait for VPD or expansion ROM access outside of our + * control, so we allow up to 100 ms. */ + unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10); + int i; + + for (i = 0; i < 10; i++) { + if (!falcon_spi_poll(efx)) + return 0; + udelay(10); + } + + for (;;) { + if (!falcon_spi_poll(efx)) + return 0; + if (time_after_eq(jiffies, timeout)) { + netif_err(efx, hw, efx->net_dev, + "timed out waiting for SPI\n"); + return -ETIMEDOUT; + } + schedule_timeout_uninterruptible(1); + } + } + + int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, + unsigned int command, int address, + const void *in, void *out, size_t len) + { + bool addressed = (address >= 0); + bool reading = (out != NULL); + efx_oword_t reg; + int rc; + + /* Input validation */ + if (len > FALCON_SPI_MAX_LEN) + return -EINVAL; + + /* Check that previous command is not still running */ + rc = falcon_spi_poll(efx); + if (rc) + return rc; + + /* Program address register, if we have an address */ + if (addressed) { + EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address); + efx_writeo(efx, ®, FR_AB_EE_SPI_HADR); + } + + /* Program data register, if we have data */ + if (in != NULL) { + memcpy(®, in, len); + efx_writeo(efx, ®, FR_AB_EE_SPI_HDATA); + } + + /* Issue read/write command */ + EFX_POPULATE_OWORD_7(reg, + FRF_AB_EE_SPI_HCMD_CMD_EN, 1, + FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id, + FRF_AB_EE_SPI_HCMD_DABCNT, len, + FRF_AB_EE_SPI_HCMD_READ, reading, + FRF_AB_EE_SPI_HCMD_DUBCNT, 0, + FRF_AB_EE_SPI_HCMD_ADBCNT, + (addressed ? spi->addr_len : 0), + FRF_AB_EE_SPI_HCMD_ENC, command); + efx_writeo(efx, ®, FR_AB_EE_SPI_HCMD); + + /* Wait for read/write to complete */ + rc = falcon_spi_wait(efx); + if (rc) + return rc; + + /* Read data */ + if (out != NULL) { + efx_reado(efx, ®, FR_AB_EE_SPI_HDATA); + memcpy(out, ®, len); + } + + return 0; + } + + static size_t + falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start) + { + return min(FALCON_SPI_MAX_LEN, + (spi->block_size - (start & (spi->block_size - 1)))); + } + + static inline u8 + efx_spi_munge_command(const struct efx_spi_device *spi, + const u8 command, const unsigned int address) + { + return command | (((address >> 8) & spi->munge_address) << 3); + } + + /* Wait up to 10 ms for buffered write completion */ + int + falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) + { + unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); + u8 status; + int rc; + + for (;;) { + rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, + &status, sizeof(status)); + if (rc) + return rc; + if (!(status & SPI_STATUS_NRDY)) + return 0; + if (time_after_eq(jiffies, timeout)) { + netif_err(efx, hw, efx->net_dev, + "SPI write timeout on device %d" + " last status=0x%02x\n", + spi->device_id, status); + return -ETIMEDOUT; + } + schedule_timeout_uninterruptible(1); + } + } + + int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, + loff_t start, size_t len, size_t *retlen, u8 *buffer) + { + size_t block_len, pos = 0; + unsigned int command; + int rc = 0; + + while (pos < len) { + block_len = min(len - pos, FALCON_SPI_MAX_LEN); + + command = efx_spi_munge_command(spi, SPI_READ, start + pos); + rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, + buffer + pos, block_len); + if (rc) + break; + pos += block_len; + + /* Avoid locking up the system */ + cond_resched(); + if (signal_pending(current)) { + rc = -EINTR; + break; + } + } + + if (retlen) + *retlen = pos; + return rc; + } + + int + falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, + loff_t start, size_t len, size_t *retlen, const u8 *buffer) + { + u8 verify_buffer[FALCON_SPI_MAX_LEN]; + size_t block_len, pos = 0; + unsigned int command; + int rc = 0; + + while (pos < len) { + rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); + if (rc) + break; + + block_len = min(len - pos, + falcon_spi_write_limit(spi, start + pos)); + command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); + rc = falcon_spi_cmd(efx, spi, command, start + pos, + buffer + pos, NULL, block_len); + if (rc) + break; + + rc = falcon_spi_wait_write(efx, spi); + if (rc) + break; + + command = efx_spi_munge_command(spi, SPI_READ, start + pos); + rc = falcon_spi_cmd(efx, spi, command, start + pos, + NULL, verify_buffer, block_len); + if (memcmp(verify_buffer, buffer + pos, block_len)) { + rc = -EIO; + break; + } + + pos += block_len; + + /* Avoid locking up the system */ + cond_resched(); + if (signal_pending(current)) { + rc = -EINTR; + break; + } + } + + if (retlen) + *retlen = pos; + return rc; + } + + /************************************************************************** + * + * MAC wrapper + * + ************************************************************************** + */ + + static void falcon_push_multicast_hash(struct efx_nic *efx) + { + union efx_multicast_hash *mc_hash = &efx->multicast_hash; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0); + efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1); + } + + static void falcon_reset_macs(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg, mac_ctrl; + int count; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { + /* It's not safe to use GLB_CTL_REG to reset the + * macs, so instead use the internal MAC resets + */ + EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1); + efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); + + for (count = 0; count < 10000; count++) { + efx_reado(efx, ®, FR_AB_XM_GLB_CFG); + if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) == + 0) + return; + udelay(10); + } + + netif_err(efx, hw, efx->net_dev, + "timed out waiting for XMAC core reset\n"); + } + + /* Mac stats will fail whist the TX fifo is draining */ + WARN_ON(nic_data->stats_disable_count == 0); + + efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL); + EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1); + efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); + + efx_reado(efx, ®, FR_AB_GLB_CTL); + EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1); + EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1); + EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1); + efx_writeo(efx, ®, FR_AB_GLB_CTL); + + count = 0; + while (1) { + efx_reado(efx, ®, FR_AB_GLB_CTL); + if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && + !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && + !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { + netif_dbg(efx, hw, efx->net_dev, + "Completed MAC reset after %d loops\n", + count); + break; + } + if (count > 20) { + netif_err(efx, hw, efx->net_dev, "MAC reset failed\n"); + break; + } + count++; + udelay(10); + } + + /* Ensure the correct MAC is selected before statistics + * are re-enabled by the caller */ + efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); + + falcon_setup_xaui(efx); + } + + void falcon_drain_tx_fifo(struct efx_nic *efx) + { + efx_oword_t reg; + + if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) || + (efx->loopback_mode != LOOPBACK_NONE)) + return; + + efx_reado(efx, ®, FR_AB_MAC_CTRL); + /* There is no point in draining more than once */ + if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN)) + return; + + falcon_reset_macs(efx); + } + + static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) + { + efx_oword_t reg; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) + return; + + /* Isolate the MAC -> RX */ + efx_reado(efx, ®, FR_AZ_RX_CFG); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0); + efx_writeo(efx, ®, FR_AZ_RX_CFG); + + /* Isolate TX -> MAC */ + falcon_drain_tx_fifo(efx); + } + + void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) + { + struct efx_link_state *link_state = &efx->link_state; + efx_oword_t reg; + int link_speed, isolate; + + isolate = !!ACCESS_ONCE(efx->reset_pending); + + switch (link_state->speed) { + case 10000: link_speed = 3; break; + case 1000: link_speed = 2; break; + case 100: link_speed = 1; break; + default: link_speed = 0; break; + } + /* MAC_LINK_STATUS controls MAC backpressure but doesn't work + * as advertised. Disable to ensure packets are not + * indefinitely held and TX queue can be flushed at any point + * while the link is down. */ + EFX_POPULATE_OWORD_5(reg, + FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, + FRF_AB_MAC_BCAD_ACPT, 1, + FRF_AB_MAC_UC_PROM, efx->promiscuous, + FRF_AB_MAC_LINK_STATUS, 1, /* always set */ + FRF_AB_MAC_SPEED, link_speed); + /* On B0, MAC backpressure can be disabled and packets get + * discarded. */ + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, + !link_state->up || isolate); + } + + efx_writeo(efx, ®, FR_AB_MAC_CTRL); + + /* Restore the multicast hash registers. */ + falcon_push_multicast_hash(efx); + + efx_reado(efx, ®, FR_AZ_RX_CFG); + /* Enable XOFF signal from RX FIFO (we enabled it during NIC + * initialisation but it may read back as 0) */ + EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); + /* Unisolate the MAC -> RX */ + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate); + efx_writeo(efx, ®, FR_AZ_RX_CFG); + } + + static void falcon_stats_request(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + + WARN_ON(nic_data->stats_pending); + WARN_ON(nic_data->stats_disable_count); + + if (nic_data->stats_dma_done == NULL) + return; /* no mac selected */ + + *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE; + nic_data->stats_pending = true; + wmb(); /* ensure done flag is clear */ + + /* Initiate DMA transfer of stats */ + EFX_POPULATE_OWORD_2(reg, + FRF_AB_MAC_STAT_DMA_CMD, 1, + FRF_AB_MAC_STAT_DMA_ADR, + efx->stats_buffer.dma_addr); + efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA); + + mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2)); + } + + static void falcon_stats_complete(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + + if (!nic_data->stats_pending) + return; + + nic_data->stats_pending = 0; + if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { + rmb(); /* read the done flag before the stats */ + efx->mac_op->update_stats(efx); + } else { + netif_err(efx, hw, efx->net_dev, + "timed out waiting for statistics\n"); + } + } + + static void falcon_stats_timer_func(unsigned long context) + { + struct efx_nic *efx = (struct efx_nic *)context; + struct falcon_nic_data *nic_data = efx->nic_data; + + spin_lock(&efx->stats_lock); + + falcon_stats_complete(efx); + if (nic_data->stats_disable_count == 0) + falcon_stats_request(efx); + + spin_unlock(&efx->stats_lock); + } + + static bool falcon_loopback_link_poll(struct efx_nic *efx) + { + struct efx_link_state old_state = efx->link_state; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + WARN_ON(!LOOPBACK_INTERNAL(efx)); + + efx->link_state.fd = true; + efx->link_state.fc = efx->wanted_fc; + efx->link_state.up = true; + efx->link_state.speed = 10000; + + return !efx_link_state_equal(&efx->link_state, &old_state); + } + + static int falcon_reconfigure_port(struct efx_nic *efx) + { + int rc; + + WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0); + + /* Poll the PHY link state *before* reconfiguring it. This means we + * will pick up the correct speed (in loopback) to select the correct + * MAC. + */ + if (LOOPBACK_INTERNAL(efx)) + falcon_loopback_link_poll(efx); + else + efx->phy_op->poll(efx); + + falcon_stop_nic_stats(efx); + falcon_deconfigure_mac_wrapper(efx); + + falcon_reset_macs(efx); + + efx->phy_op->reconfigure(efx); + rc = efx->mac_op->reconfigure(efx); + BUG_ON(rc); + + falcon_start_nic_stats(efx); + + /* Synchronise efx->link_state with the kernel */ + efx_link_status_changed(efx); + + return 0; + } + + /************************************************************************** + * + * PHY access via GMII + * + ************************************************************************** + */ + + /* Wait for GMII access to complete */ + static int falcon_gmii_wait(struct efx_nic *efx) + { + efx_oword_t md_stat; + int count; + + /* wait up to 50ms - taken max from datasheet */ + for (count = 0; count < 5000; count++) { + efx_reado(efx, &md_stat, FR_AB_MD_STAT); + if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { + if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || + EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { + netif_err(efx, hw, efx->net_dev, + "error from GMII access " + EFX_OWORD_FMT"\n", + EFX_OWORD_VAL(md_stat)); + return -EIO; + } + return 0; + } + udelay(10); + } + netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); + return -ETIMEDOUT; + } + + /* Write an MDIO register of a PHY connected to Falcon. */ + static int falcon_mdio_write(struct net_device *net_dev, + int prtad, int devad, u16 addr, u16 value) + { + struct efx_nic *efx = netdev_priv(net_dev); + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + int rc; + + netif_vdbg(efx, hw, efx->net_dev, + "writing MDIO %d register %d.%d with 0x%04x\n", + prtad, devad, addr, value); + + mutex_lock(&nic_data->mdio_lock); + + /* Check MDIO not currently being accessed */ + rc = falcon_gmii_wait(efx); + if (rc) + goto out; + + /* Write the address/ID register */ + EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); + efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); + + EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, + FRF_AB_MD_DEV_ADR, devad); + efx_writeo(efx, ®, FR_AB_MD_ID); + + /* Write data */ + EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); + efx_writeo(efx, ®, FR_AB_MD_TXD); + + EFX_POPULATE_OWORD_2(reg, + FRF_AB_MD_WRC, 1, + FRF_AB_MD_GC, 0); + efx_writeo(efx, ®, FR_AB_MD_CS); + + /* Wait for data to be written */ + rc = falcon_gmii_wait(efx); + if (rc) { + /* Abort the write operation */ + EFX_POPULATE_OWORD_2(reg, + FRF_AB_MD_WRC, 0, + FRF_AB_MD_GC, 1); + efx_writeo(efx, ®, FR_AB_MD_CS); + udelay(10); + } + + out: + mutex_unlock(&nic_data->mdio_lock); + return rc; + } + + /* Read an MDIO register of a PHY connected to Falcon. */ + static int falcon_mdio_read(struct net_device *net_dev, + int prtad, int devad, u16 addr) + { + struct efx_nic *efx = netdev_priv(net_dev); + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + int rc; + + mutex_lock(&nic_data->mdio_lock); + + /* Check MDIO not currently being accessed */ + rc = falcon_gmii_wait(efx); + if (rc) + goto out; + + EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); + efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); + + EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, + FRF_AB_MD_DEV_ADR, devad); + efx_writeo(efx, ®, FR_AB_MD_ID); + + /* Request data to be read */ + EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); + efx_writeo(efx, ®, FR_AB_MD_CS); + + /* Wait for data to become available */ + rc = falcon_gmii_wait(efx); + if (rc == 0) { + efx_reado(efx, ®, FR_AB_MD_RXD); + rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); + netif_vdbg(efx, hw, efx->net_dev, + "read from MDIO %d register %d.%d, got %04x\n", + prtad, devad, addr, rc); + } else { + /* Abort the read operation */ + EFX_POPULATE_OWORD_2(reg, + FRF_AB_MD_RIC, 0, + FRF_AB_MD_GC, 1); + efx_writeo(efx, ®, FR_AB_MD_CS); + + netif_dbg(efx, hw, efx->net_dev, + "read from MDIO %d register %d.%d, got error %d\n", + prtad, devad, addr, rc); + } + + out: + mutex_unlock(&nic_data->mdio_lock); + return rc; + } + + /* This call is responsible for hooking in the MAC and PHY operations */ + static int falcon_probe_port(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + switch (efx->phy_type) { + case PHY_TYPE_SFX7101: + efx->phy_op = &falcon_sfx7101_phy_ops; + break; + case PHY_TYPE_QT2022C2: + case PHY_TYPE_QT2025C: + efx->phy_op = &falcon_qt202x_phy_ops; + break; + case PHY_TYPE_TXC43128: + efx->phy_op = &falcon_txc_phy_ops; + break; + default: + netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", + efx->phy_type); + return -ENODEV; + } + + /* Fill out MDIO structure and loopback modes */ + mutex_init(&nic_data->mdio_lock); + efx->mdio.mdio_read = falcon_mdio_read; + efx->mdio.mdio_write = falcon_mdio_write; + rc = efx->phy_op->probe(efx); + if (rc != 0) + return rc; + + /* Initial assumption */ + efx->link_state.speed = 10000; + efx->link_state.fd = true; + + /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) + efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; + else + efx->wanted_fc = EFX_FC_RX; + if (efx->mdio.mmds & MDIO_DEVS_AN) + efx->wanted_fc |= EFX_FC_AUTO; + + /* Allocate buffer for stats */ + rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, + FALCON_MAC_STATS_SIZE); + if (rc) + return rc; + netif_dbg(efx, probe, efx->net_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64)efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64)virt_to_phys(efx->stats_buffer.addr)); + nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset; + + return 0; + } + + static void falcon_remove_port(struct efx_nic *efx) + { + efx->phy_op->remove(efx); + efx_nic_free_buffer(efx, &efx->stats_buffer); + } + + /* Global events are basically PHY events */ + static bool + falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event) + { + struct efx_nic *efx = channel->efx; + struct falcon_nic_data *nic_data = efx->nic_data; + + if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || + EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || + EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) + /* Ignored */ + return true; + + if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) && + EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { + nic_data->xmac_poll_required = true; + return true; + } + + if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? + EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : + EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { + netif_err(efx, rx_err, efx->net_dev, + "channel %d seen global RX_RESET event. Resetting.\n", + channel->channel); + + atomic_inc(&efx->rx_reset); + efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? + RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); + return true; + } + + return false; + } + + /************************************************************************** + * + * Falcon test code + * + **************************************************************************/ + + static int + falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) + { + struct falcon_nic_data *nic_data = efx->nic_data; + struct falcon_nvconfig *nvconfig; + struct efx_spi_device *spi; + void *region; + int rc, magic_num, struct_ver; + __le16 *word, *limit; + u32 csum; + + if (efx_spi_present(&nic_data->spi_flash)) + spi = &nic_data->spi_flash; + else if (efx_spi_present(&nic_data->spi_eeprom)) + spi = &nic_data->spi_eeprom; + else + return -EINVAL; + + region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); + if (!region) + return -ENOMEM; + nvconfig = region + FALCON_NVCONFIG_OFFSET; + + mutex_lock(&nic_data->spi_lock); + rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); + mutex_unlock(&nic_data->spi_lock); + if (rc) { + netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", + efx_spi_present(&nic_data->spi_flash) ? + "flash" : "EEPROM"); + rc = -EIO; + goto out; + } + + magic_num = le16_to_cpu(nvconfig->board_magic_num); + struct_ver = le16_to_cpu(nvconfig->board_struct_ver); + + rc = -EINVAL; + if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { + netif_err(efx, hw, efx->net_dev, + "NVRAM bad magic 0x%x\n", magic_num); + goto out; + } + if (struct_ver < 2) { + netif_err(efx, hw, efx->net_dev, + "NVRAM has ancient version 0x%x\n", struct_ver); + goto out; + } else if (struct_ver < 4) { + word = &nvconfig->board_magic_num; + limit = (__le16 *) (nvconfig + 1); + } else { + word = region; + limit = region + FALCON_NVCONFIG_END; + } + for (csum = 0; word < limit; ++word) + csum += le16_to_cpu(*word); + + if (~csum & 0xffff) { + netif_err(efx, hw, efx->net_dev, + "NVRAM has incorrect checksum\n"); + goto out; + } + + rc = 0; + if (nvconfig_out) + memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig)); + + out: + kfree(region); + return rc; + } + + static int falcon_test_nvram(struct efx_nic *efx) + { + return falcon_read_nvram(efx, NULL); + } + + static const struct efx_nic_register_test falcon_b0_register_tests[] = { + { FR_AZ_ADR_REGION, + EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, + { FR_AZ_RX_CFG, + EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, + { FR_AZ_TX_CFG, + EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_TX_RESERVED, + EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, + { FR_AB_MAC_CTRL, + EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_SRM_TX_DC_CFG, + EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_CFG, + EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_PF_WM, + EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_BZ_DP_CTRL, + EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_GM_CFG2, + EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_GMF_CFG0, + EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_GLB_CFG, + EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_TX_CFG, + EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_RX_CFG, + EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_RX_PARAM, + EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_FC, + EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_ADR_LO, + EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XX_SD_CTL, + EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, + }; + + static int falcon_b0_test_registers(struct efx_nic *efx) + { + return efx_nic_test_registers(efx, falcon_b0_register_tests, + ARRAY_SIZE(falcon_b0_register_tests)); + } + + /************************************************************************** + * + * Device reset + * + ************************************************************************** + */ + + static enum reset_type falcon_map_reset_reason(enum reset_type reason) + { + switch (reason) { + case RESET_TYPE_RX_RECOVERY: + case RESET_TYPE_RX_DESC_FETCH: + case RESET_TYPE_TX_DESC_FETCH: + case RESET_TYPE_TX_SKIP: + /* These can occasionally occur due to hardware bugs. + * We try to reset without disrupting the link. + */ + return RESET_TYPE_INVISIBLE; + default: + return RESET_TYPE_ALL; + } + } + + static int falcon_map_reset_flags(u32 *flags) + { + enum { + FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER | + ETH_RESET_OFFLOAD | ETH_RESET_MAC), + FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY, + FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ, + }; + + if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) { + *flags &= ~FALCON_RESET_WORLD; + return RESET_TYPE_WORLD; + } + + if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) { + *flags &= ~FALCON_RESET_ALL; + return RESET_TYPE_ALL; + } + + if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) { + *flags &= ~FALCON_RESET_INVISIBLE; + return RESET_TYPE_INVISIBLE; + } + + return -EINVAL; + } + + /* Resets NIC to known state. This routine must be called in process + * context and is allowed to sleep. */ + static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) + { + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t glb_ctl_reg_ker; + int rc; + + netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", + RESET_TYPE(method)); + + /* Initiate device reset */ + if (method == RESET_TYPE_WORLD) { + rc = pci_save_state(efx->pci_dev); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to backup PCI state of primary " + "function prior to hardware reset\n"); + goto fail1; + } + if (efx_nic_is_dual_func(efx)) { + rc = pci_save_state(nic_data->pci_dev2); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to backup PCI state of " + "secondary function prior to " + "hardware reset\n"); + goto fail2; + } + } + + EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, + FRF_AB_EXT_PHY_RST_DUR, + FFE_AB_EXT_PHY_RST_DUR_10240US, + FRF_AB_SWRST, 1); + } else { + EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, + /* exclude PHY from "invisible" reset */ + FRF_AB_EXT_PHY_RST_CTL, + method == RESET_TYPE_INVISIBLE, + /* exclude EEPROM/flash and PCIe */ + FRF_AB_PCIE_CORE_RST_CTL, 1, + FRF_AB_PCIE_NSTKY_RST_CTL, 1, + FRF_AB_PCIE_SD_RST_CTL, 1, + FRF_AB_EE_RST_CTL, 1, + FRF_AB_EXT_PHY_RST_DUR, + FFE_AB_EXT_PHY_RST_DUR_10240US, + FRF_AB_SWRST, 1); + } + efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); + + netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); + schedule_timeout_uninterruptible(HZ / 20); + + /* Restore PCI configuration if needed */ + if (method == RESET_TYPE_WORLD) { + if (efx_nic_is_dual_func(efx)) + pci_restore_state(nic_data->pci_dev2); + pci_restore_state(efx->pci_dev); + netif_dbg(efx, drv, efx->net_dev, + "successfully restored PCI config\n"); + } + + /* Assert that reset complete */ + efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); + if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { + rc = -ETIMEDOUT; + netif_err(efx, hw, efx->net_dev, + "timed out waiting for hardware reset\n"); + goto fail3; + } + netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); + + return 0; + + /* pci_save_state() and pci_restore_state() MUST be called in pairs */ + fail2: + pci_restore_state(efx->pci_dev); + fail1: + fail3: + return rc; + } + + static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) + { + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + mutex_lock(&nic_data->spi_lock); + rc = __falcon_reset_hw(efx, method); + mutex_unlock(&nic_data->spi_lock); + + return rc; + } + + static void falcon_monitor(struct efx_nic *efx) + { + bool link_changed; + int rc; + + BUG_ON(!mutex_is_locked(&efx->mac_lock)); + + rc = falcon_board(efx)->type->monitor(efx); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "Board sensor %s; shutting down PHY\n", + (rc == -ERANGE) ? "reported fault" : "failed"); + efx->phy_mode |= PHY_MODE_LOW_POWER; + rc = __efx_reconfigure_port(efx); + WARN_ON(rc); + } + + if (LOOPBACK_INTERNAL(efx)) + link_changed = falcon_loopback_link_poll(efx); + else + link_changed = efx->phy_op->poll(efx); + + if (link_changed) { + falcon_stop_nic_stats(efx); + falcon_deconfigure_mac_wrapper(efx); + + falcon_reset_macs(efx); + rc = efx->mac_op->reconfigure(efx); + BUG_ON(rc); + + falcon_start_nic_stats(efx); + + efx_link_status_changed(efx); + } + + falcon_poll_xmac(efx); + } + + /* Zeroes out the SRAM contents. This routine must be called in + * process context and is allowed to sleep. + */ + static int falcon_reset_sram(struct efx_nic *efx) + { + efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; + int count; + + /* Set the SRAM wake/sleep GPIO appropriately. */ + efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); + EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); + EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); + efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); + + /* Initiate SRAM reset */ + EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, + FRF_AZ_SRM_INIT_EN, 1, + FRF_AZ_SRM_NB_SZ, 0); + efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); + + /* Wait for SRAM reset to complete */ + count = 0; + do { + netif_dbg(efx, hw, efx->net_dev, + "waiting for SRAM reset (attempt %d)...\n", count); + + /* SRAM reset is slow; expect around 16ms */ + schedule_timeout_uninterruptible(HZ / 50); + + /* Check for reset complete */ + efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); + if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { + netif_dbg(efx, hw, efx->net_dev, + "SRAM reset complete\n"); + + return 0; + } + } while (++count < 20); /* wait up to 0.4 sec */ + + netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); + return -ETIMEDOUT; + } + + static void falcon_spi_device_init(struct efx_nic *efx, + struct efx_spi_device *spi_device, + unsigned int device_id, u32 device_type) + { + if (device_type != 0) { + spi_device->device_id = device_id; + spi_device->size = + 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); + spi_device->addr_len = + SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN); + spi_device->munge_address = (spi_device->size == 1 << 9 && + spi_device->addr_len == 1); + spi_device->erase_command = + SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD); + spi_device->erase_size = + 1 << SPI_DEV_TYPE_FIELD(device_type, + SPI_DEV_TYPE_ERASE_SIZE); + spi_device->block_size = + 1 << SPI_DEV_TYPE_FIELD(device_type, + SPI_DEV_TYPE_BLOCK_SIZE); + } else { + spi_device->size = 0; + } + } + + /* Extract non-volatile configuration */ + static int falcon_probe_nvconfig(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + struct falcon_nvconfig *nvconfig; + int rc; + + nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); + if (!nvconfig) + return -ENOMEM; + + rc = falcon_read_nvram(efx, nvconfig); + if (rc) + goto out; + + efx->phy_type = nvconfig->board_v2.port0_phy_type; + efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr; + + if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { + falcon_spi_device_init( + efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH, + le32_to_cpu(nvconfig->board_v3 + .spi_device_type[FFE_AB_SPI_DEVICE_FLASH])); + falcon_spi_device_init( + efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, + le32_to_cpu(nvconfig->board_v3 + .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM])); + } + + /* Read the MAC addresses */ + memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN); + + netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", + efx->phy_type, efx->mdio.prtad); + + rc = falcon_probe_board(efx, + le16_to_cpu(nvconfig->board_v2.board_revision)); + out: + kfree(nvconfig); + return rc; + } + + /* Probe all SPI devices on the NIC */ + static void falcon_probe_spi_devices(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; + int boot_dev; + + efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL); + efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); + efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); + + if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { + boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? + FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); + netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n", + boot_dev == FFE_AB_SPI_DEVICE_FLASH ? + "flash" : "EEPROM"); + } else { + /* Disable VPD and set clock dividers to safe + * values for initial programming. */ + boot_dev = -1; + netif_dbg(efx, probe, efx->net_dev, + "Booted from internal ASIC settings;" + " setting SPI config\n"); + EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, + /* 125 MHz / 7 ~= 20 MHz */ + FRF_AB_EE_SF_CLOCK_DIV, 7, + /* 125 MHz / 63 ~= 2 MHz */ + FRF_AB_EE_EE_CLOCK_DIV, 63); + efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); + } + + mutex_init(&nic_data->spi_lock); + + if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) + falcon_spi_device_init(efx, &nic_data->spi_flash, + FFE_AB_SPI_DEVICE_FLASH, + default_flash_type); + if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) + falcon_spi_device_init(efx, &nic_data->spi_eeprom, + FFE_AB_SPI_DEVICE_EEPROM, + large_eeprom_type); + } + + static int falcon_probe_nic(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data; + struct falcon_board *board; + int rc; + + /* Allocate storage for hardware specific data */ + nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + efx->nic_data = nic_data; + + rc = -ENODEV; + + if (efx_nic_fpga_ver(efx) != 0) { + netif_err(efx, probe, efx->net_dev, + "Falcon FPGA not supported\n"); + goto fail1; + } + + if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { + efx_oword_t nic_stat; + struct pci_dev *dev; + u8 pci_rev = efx->pci_dev->revision; + + if ((pci_rev == 0xff) || (pci_rev == 0)) { + netif_err(efx, probe, efx->net_dev, + "Falcon rev A0 not supported\n"); + goto fail1; + } + efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); + if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { + netif_err(efx, probe, efx->net_dev, + "Falcon rev A1 1G not supported\n"); + goto fail1; + } + if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { + netif_err(efx, probe, efx->net_dev, + "Falcon rev A1 PCI-X not supported\n"); + goto fail1; + } + + dev = pci_dev_get(efx->pci_dev); - while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, ++ while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE, ++ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, + dev))) { + if (dev->bus == efx->pci_dev->bus && + dev->devfn == efx->pci_dev->devfn + 1) { + nic_data->pci_dev2 = dev; + break; + } + } + if (!nic_data->pci_dev2) { + netif_err(efx, probe, efx->net_dev, + "failed to find secondary function\n"); + rc = -ENODEV; + goto fail2; + } + } + + /* Now we can reset the NIC */ + rc = __falcon_reset_hw(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); + goto fail3; + } + + /* Allocate memory for INT_KER */ + rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); + if (rc) + goto fail4; + BUG_ON(efx->irq_status.dma_addr & 0x0f); + + netif_dbg(efx, probe, efx->net_dev, + "INT_KER at %llx (virt %p phys %llx)\n", + (u64)efx->irq_status.dma_addr, + efx->irq_status.addr, + (u64)virt_to_phys(efx->irq_status.addr)); + + falcon_probe_spi_devices(efx); + + /* Read in the non-volatile configuration */ + rc = falcon_probe_nvconfig(efx); + if (rc) { + if (rc == -EINVAL) + netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n"); + goto fail5; + } + + /* Initialise I2C adapter */ + board = falcon_board(efx); + board->i2c_adap.owner = THIS_MODULE; + board->i2c_data = falcon_i2c_bit_operations; + board->i2c_data.data = efx; + board->i2c_adap.algo_data = &board->i2c_data; + board->i2c_adap.dev.parent = &efx->pci_dev->dev; + strlcpy(board->i2c_adap.name, "SFC4000 GPIO", + sizeof(board->i2c_adap.name)); + rc = i2c_bit_add_bus(&board->i2c_adap); + if (rc) + goto fail5; + + rc = falcon_board(efx)->type->init(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to initialise board\n"); + goto fail6; + } + + nic_data->stats_disable_count = 1; + setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, + (unsigned long)efx); + + return 0; + + fail6: + BUG_ON(i2c_del_adapter(&board->i2c_adap)); + memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); + fail5: + efx_nic_free_buffer(efx, &efx->irq_status); + fail4: + fail3: + if (nic_data->pci_dev2) { + pci_dev_put(nic_data->pci_dev2); + nic_data->pci_dev2 = NULL; + } + fail2: + fail1: + kfree(efx->nic_data); + return rc; + } + + static void falcon_init_rx_cfg(struct efx_nic *efx) + { + /* Prior to Siena the RX DMA engine will split each frame at + * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to + * be so large that that never happens. */ + const unsigned huge_buf_size = (3 * 4096) >> 5; + /* RX control FIFO thresholds (32 entries) */ + const unsigned ctrl_xon_thr = 20; + const unsigned ctrl_xoff_thr = 25; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AZ_RX_CFG); + if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { + /* Data FIFO size is 5.5K */ + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, + huge_buf_size); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); + } else { + /* Data FIFO size is 80K; register fields moved */ + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, + huge_buf_size); + /* Send XON and XOFF at ~3 * max MTU away from empty/full */ + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); + + /* Enable hash insertion. This is broken for the + * 'Falcon' hash so also select Toeplitz TCP/IPv4 and + * IPv4 hashes. */ + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1); + } + /* Always enable XOFF signal from RX FIFO. We enable + * or disable transmission of pause frames at the MAC. */ + EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); + efx_writeo(efx, ®, FR_AZ_RX_CFG); + } + + /* This call performs hardware-specific global initialisation, such as + * defining the descriptor cache sizes and number of RSS channels. + * It does not set up any buffers, descriptor rings or event queues. + */ + static int falcon_init_nic(struct efx_nic *efx) + { + efx_oword_t temp; + int rc; + + /* Use on-chip SRAM */ + efx_reado(efx, &temp, FR_AB_NIC_STAT); + EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); + efx_writeo(efx, &temp, FR_AB_NIC_STAT); + + rc = falcon_reset_sram(efx); + if (rc) + return rc; + + /* Clear the parity enables on the TX data fifos as + * they produce false parity errors because of timing issues + */ + if (EFX_WORKAROUND_5129(efx)) { + efx_reado(efx, &temp, FR_AZ_CSR_SPARE); + EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0); + efx_writeo(efx, &temp, FR_AZ_CSR_SPARE); + } + + if (EFX_WORKAROUND_7244(efx)) { + efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8); + efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL); + } + + /* XXX This is documented only for Falcon A0/A1 */ + /* Setup RX. Wait for descriptor is broken and must + * be disabled. RXDP recovery shouldn't be needed, but is. + */ + efx_reado(efx, &temp, FR_AA_RX_SELF_RST); + EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1); + if (EFX_WORKAROUND_5583(efx)) + EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1); + efx_writeo(efx, &temp, FR_AA_RX_SELF_RST); + + /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 + * descriptors (which is bad). + */ + efx_reado(efx, &temp, FR_AZ_TX_CFG); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); + efx_writeo(efx, &temp, FR_AZ_TX_CFG); + + falcon_init_rx_cfg(efx); + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + /* Set hash key for IPv4 */ + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + + /* Set destination of both TX and RX Flush events */ + EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); + efx_writeo(efx, &temp, FR_BZ_DP_CTRL); + } + + efx_nic_init_common(efx); + + return 0; + } + + static void falcon_remove_nic(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + struct falcon_board *board = falcon_board(efx); + int rc; + + board->type->fini(efx); + + /* Remove I2C adapter and clear it in preparation for a retry */ + rc = i2c_del_adapter(&board->i2c_adap); + BUG_ON(rc); + memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); + + efx_nic_free_buffer(efx, &efx->irq_status); + + __falcon_reset_hw(efx, RESET_TYPE_ALL); + + /* Release the second function after the reset */ + if (nic_data->pci_dev2) { + pci_dev_put(nic_data->pci_dev2); + nic_data->pci_dev2 = NULL; + } + + /* Tear down the private nic state */ + kfree(efx->nic_data); + efx->nic_data = NULL; + } + + static void falcon_update_nic_stats(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t cnt; + + if (nic_data->stats_disable_count) + return; + + efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); + efx->n_rx_nodesc_drop_cnt += + EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); + + if (nic_data->stats_pending && + *nic_data->stats_dma_done == FALCON_STATS_DONE) { + nic_data->stats_pending = false; + rmb(); /* read the done flag before the stats */ + efx->mac_op->update_stats(efx); + } + } + + void falcon_start_nic_stats(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + + spin_lock_bh(&efx->stats_lock); + if (--nic_data->stats_disable_count == 0) + falcon_stats_request(efx); + spin_unlock_bh(&efx->stats_lock); + } + + void falcon_stop_nic_stats(struct efx_nic *efx) + { + struct falcon_nic_data *nic_data = efx->nic_data; + int i; + + might_sleep(); + + spin_lock_bh(&efx->stats_lock); + ++nic_data->stats_disable_count; + spin_unlock_bh(&efx->stats_lock); + + del_timer_sync(&nic_data->stats_timer); + + /* Wait enough time for the most recent transfer to + * complete. */ + for (i = 0; i < 4 && nic_data->stats_pending; i++) { + if (*nic_data->stats_dma_done == FALCON_STATS_DONE) + break; + msleep(1); + } + + spin_lock_bh(&efx->stats_lock); + falcon_stats_complete(efx); + spin_unlock_bh(&efx->stats_lock); + } + + static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) + { + falcon_board(efx)->type->set_id_led(efx, mode); + } + + /************************************************************************** + * + * Wake on LAN + * + ************************************************************************** + */ + + static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) + { + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); + } + + static int falcon_set_wol(struct efx_nic *efx, u32 type) + { + if (type != 0) + return -EINVAL; + return 0; + } + + /************************************************************************** + * + * Revision-dependent attributes used by efx.c and nic.c + * + ************************************************************************** + */ + + const struct efx_nic_type falcon_a1_nic_type = { + .probe = falcon_probe_nic, + .remove = falcon_remove_nic, + .init = falcon_init_nic, + .fini = efx_port_dummy_op_void, + .monitor = falcon_monitor, + .map_reset_reason = falcon_map_reset_reason, + .map_reset_flags = falcon_map_reset_flags, + .reset = falcon_reset_hw, + .probe_port = falcon_probe_port, + .remove_port = falcon_remove_port, + .handle_global_event = falcon_handle_global_event, + .prepare_flush = falcon_prepare_flush, + .update_stats = falcon_update_nic_stats, + .start_stats = falcon_start_nic_stats, + .stop_stats = falcon_stop_nic_stats, + .set_id_led = falcon_set_id_led, + .push_irq_moderation = falcon_push_irq_moderation, + .push_multicast_hash = falcon_push_multicast_hash, + .reconfigure_port = falcon_reconfigure_port, + .get_wol = falcon_get_wol, + .set_wol = falcon_set_wol, + .resume_wol = efx_port_dummy_op_void, + .test_nvram = falcon_test_nvram, + .default_mac_ops = &falcon_xmac_operations, + + .revision = EFX_REV_FALCON_A1, + .mem_map_size = 0x20000, + .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, + .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, + .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, + .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER, + .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, + .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), + .rx_buffer_padding = 0x24, + .max_interrupt_mode = EFX_INT_MODE_MSI, + .phys_addr_channels = 4, + .tx_dc_base = 0x130000, + .rx_dc_base = 0x100000, + .offload_features = NETIF_F_IP_CSUM, + }; + + const struct efx_nic_type falcon_b0_nic_type = { + .probe = falcon_probe_nic, + .remove = falcon_remove_nic, + .init = falcon_init_nic, + .fini = efx_port_dummy_op_void, + .monitor = falcon_monitor, + .map_reset_reason = falcon_map_reset_reason, + .map_reset_flags = falcon_map_reset_flags, + .reset = falcon_reset_hw, + .probe_port = falcon_probe_port, + .remove_port = falcon_remove_port, + .handle_global_event = falcon_handle_global_event, + .prepare_flush = falcon_prepare_flush, + .update_stats = falcon_update_nic_stats, + .start_stats = falcon_start_nic_stats, + .stop_stats = falcon_stop_nic_stats, + .set_id_led = falcon_set_id_led, + .push_irq_moderation = falcon_push_irq_moderation, + .push_multicast_hash = falcon_push_multicast_hash, + .reconfigure_port = falcon_reconfigure_port, + .get_wol = falcon_get_wol, + .set_wol = falcon_set_wol, + .resume_wol = efx_port_dummy_op_void, + .test_registers = falcon_b0_test_registers, + .test_nvram = falcon_test_nvram, + .default_mac_ops = &falcon_xmac_operations, + + .revision = EFX_REV_FALCON_B0, + /* Map everything up to and including the RSS indirection + * table. Don't map MSI-X table, MSI-X PBA since Linux + * requires that they not be mapped. */ + .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * + FR_BZ_RX_INDIRECTION_TBL_ROWS), + .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, + .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, + .buf_tbl_base = FR_BZ_BUF_FULL_TBL, + .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, + .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, + .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), + .rx_buffer_hash_size = 0x10, + .rx_buffer_padding = 0, + .max_interrupt_mode = EFX_INT_MODE_MSIX, + .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy + * interrupt handler only supports 32 + * channels */ + .tx_dc_base = 0x130000, + .rx_dc_base = 0x100000, + .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, + }; + diff --combined drivers/net/ethernet/sfc/falcon_boards.c index 6cc16b8,b9cc846..6cc16b8 --- a/drivers/net/ethernet/sfc/falcon_boards.c +++ b/drivers/net/ethernet/sfc/falcon_boards.c @@@ -764,8 -764,7 +764,8 @@@ int falcon_probe_board(struct efx_nic *
if (board->type) { netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n", - (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) + (efx->pci_dev->subsystem_vendor == + PCI_VENDOR_ID_SOLARFLARE) ? board->type->ref_model : board->type->gen_type, 'A' + board->major, board->minor); return 0; diff --combined drivers/net/ethernet/smsc/smsc911x.c index 0000000,a3aa4c0..d2be42a mode 000000,100644..100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@@ -1,0 -1,2406 +1,2408 @@@ + /*************************************************************************** + * + * Copyright (C) 2004-2008 SMSC + * Copyright (C) 2005-2008 ARM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + *************************************************************************** + * Rewritten, heavily based on smsc911x simple driver by SMSC. + * Partly uses io macros from smc91x.c by Nicolas Pitre + * + * Supported devices: + * LAN9115, LAN9116, LAN9117, LAN9118 + * LAN9215, LAN9216, LAN9217, LAN9218 + * LAN9210, LAN9211 + * LAN9220, LAN9221 ++ * LAN89218 + * + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include <linux/crc32.h> + #include <linux/delay.h> + #include <linux/errno.h> + #include <linux/etherdevice.h> + #include <linux/ethtool.h> + #include <linux/init.h> + #include <linux/interrupt.h> + #include <linux/ioport.h> + #include <linux/kernel.h> + #include <linux/module.h> + #include <linux/netdevice.h> + #include <linux/platform_device.h> + #include <linux/sched.h> + #include <linux/timer.h> + #include <linux/bug.h> + #include <linux/bitops.h> + #include <linux/irq.h> + #include <linux/io.h> + #include <linux/swab.h> + #include <linux/phy.h> + #include <linux/smsc911x.h> + #include <linux/device.h> + #include <linux/of.h> + #include <linux/of_device.h> + #include <linux/of_gpio.h> + #include <linux/of_net.h> + #include "smsc911x.h" + + #define SMSC_CHIPNAME "smsc911x" + #define SMSC_MDIONAME "smsc911x-mdio" + #define SMSC_DRV_VERSION "2008-10-21" + + MODULE_LICENSE("GPL"); + MODULE_VERSION(SMSC_DRV_VERSION); + MODULE_ALIAS("platform:smsc911x"); + + #if USE_DEBUG > 0 + static int debug = 16; + #else + static int debug = 3; + #endif + + module_param(debug, int, 0); + MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + + struct smsc911x_data; + + struct smsc911x_ops { + u32 (*reg_read)(struct smsc911x_data *pdata, u32 reg); + void (*reg_write)(struct smsc911x_data *pdata, u32 reg, u32 val); + void (*rx_readfifo)(struct smsc911x_data *pdata, + unsigned int *buf, unsigned int wordcount); + void (*tx_writefifo)(struct smsc911x_data *pdata, + unsigned int *buf, unsigned int wordcount); + }; + + struct smsc911x_data { + void __iomem *ioaddr; + + unsigned int idrev; + + /* used to decide which workarounds apply */ + unsigned int generation; + + /* device configuration (copied from platform_data during probe) */ + struct smsc911x_platform_config config; + + /* This needs to be acquired before calling any of below: + * smsc911x_mac_read(), smsc911x_mac_write() + */ + spinlock_t mac_lock; + + /* spinlock to ensure register accesses are serialised */ + spinlock_t dev_lock; + + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + int phy_irq[PHY_MAX_ADDR]; + unsigned int using_extphy; + int last_duplex; + int last_carrier; + + u32 msg_enable; + unsigned int gpio_setting; + unsigned int gpio_orig_setting; + struct net_device *dev; + struct napi_struct napi; + + unsigned int software_irq_signal; + + #ifdef USE_PHY_WORK_AROUND + #define MIN_PACKET_SIZE (64) + char loopback_tx_pkt[MIN_PACKET_SIZE]; + char loopback_rx_pkt[MIN_PACKET_SIZE]; + unsigned int resetcount; + #endif + + /* Members for Multicast filter workaround */ + unsigned int multicast_update_pending; + unsigned int set_bits_mask; + unsigned int clear_bits_mask; + unsigned int hashhi; + unsigned int hashlo; + + /* register access functions */ + const struct smsc911x_ops *ops; + }; + + /* Easy access to information */ + #define __smsc_shift(pdata, reg) ((reg) << ((pdata)->config.shift)) + + static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) + { + if (pdata->config.flags & SMSC911X_USE_32BIT) + return readl(pdata->ioaddr + reg); + + if (pdata->config.flags & SMSC911X_USE_16BIT) + return ((readw(pdata->ioaddr + reg) & 0xFFFF) | + ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16)); + + BUG(); + return 0; + } + + static inline u32 + __smsc911x_reg_read_shift(struct smsc911x_data *pdata, u32 reg) + { + if (pdata->config.flags & SMSC911X_USE_32BIT) + return readl(pdata->ioaddr + __smsc_shift(pdata, reg)); + + if (pdata->config.flags & SMSC911X_USE_16BIT) + return (readw(pdata->ioaddr + + __smsc_shift(pdata, reg)) & 0xFFFF) | + ((readw(pdata->ioaddr + + __smsc_shift(pdata, reg + 2)) & 0xFFFF) << 16); + + BUG(); + return 0; + } + + static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) + { + u32 data; + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + data = pdata->ops->reg_read(pdata, reg); + spin_unlock_irqrestore(&pdata->dev_lock, flags); + + return data; + } + + static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) + { + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writel(val, pdata->ioaddr + reg); + return; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + writew(val & 0xFFFF, pdata->ioaddr + reg); + writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2); + return; + } + + BUG(); + } + + static inline void + __smsc911x_reg_write_shift(struct smsc911x_data *pdata, u32 reg, u32 val) + { + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writel(val, pdata->ioaddr + __smsc_shift(pdata, reg)); + return; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + writew(val & 0xFFFF, + pdata->ioaddr + __smsc_shift(pdata, reg)); + writew((val >> 16) & 0xFFFF, + pdata->ioaddr + __smsc_shift(pdata, reg + 2)); + return; + } + + BUG(); + } + + static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) + { + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + pdata->ops->reg_write(pdata, reg, val); + spin_unlock_irqrestore(&pdata->dev_lock, flags); + } + + /* Writes a packet to the TX_DATA_FIFO */ + static inline void + smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) + { + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + __smsc911x_reg_write(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); + goto out; + } + + BUG(); + out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); + } + + /* Writes a packet to the TX_DATA_FIFO - shifted version */ + static inline void + smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) + { + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + __smsc911x_reg_write_shift(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writesl(pdata->ioaddr + __smsc_shift(pdata, + TX_DATA_FIFO), buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + __smsc911x_reg_write_shift(pdata, + TX_DATA_FIFO, *buf++); + goto out; + } + + BUG(); + out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); + } + + /* Reads a packet out of the RX_DATA_FIFO */ + static inline void + smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) + { + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + *buf++ = swab32(__smsc911x_reg_read(pdata, + RX_DATA_FIFO)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO); + goto out; + } + + BUG(); + out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); + } + + /* Reads a packet out of the RX_DATA_FIFO - shifted version */ + static inline void + smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) + { + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + *buf++ = swab32(__smsc911x_reg_read_shift(pdata, + RX_DATA_FIFO)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + readsl(pdata->ioaddr + __smsc_shift(pdata, + RX_DATA_FIFO), buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + *buf++ = __smsc911x_reg_read_shift(pdata, + RX_DATA_FIFO); + goto out; + } + + BUG(); + out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); + } + + /* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read + * and smsc911x_mac_write, so assumes mac_lock is held */ + static int smsc911x_mac_complete(struct smsc911x_data *pdata) + { + int i; + u32 val; + + SMSC_ASSERT_MAC_LOCK(pdata); + + for (i = 0; i < 40; i++) { + val = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (!(val & MAC_CSR_CMD_CSR_BUSY_)) + return 0; + } + SMSC_WARN(pdata, hw, "Timed out waiting for MAC not BUSY. " + "MAC_CSR_CMD: 0x%08X", val); + return -EIO; + } + + /* Fetches a MAC register value. Assumes mac_lock is acquired */ + static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset) + { + unsigned int temp; + + SMSC_ASSERT_MAC_LOCK(pdata); + + temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { + SMSC_WARN(pdata, hw, "MAC busy at entry"); + return 0xFFFFFFFF; + } + + /* Send the MAC cmd */ + smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | + MAC_CSR_CMD_CSR_BUSY_ | MAC_CSR_CMD_R_NOT_W_)); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + /* Wait for the read to complete */ + if (likely(smsc911x_mac_complete(pdata) == 0)) + return smsc911x_reg_read(pdata, MAC_CSR_DATA); + + SMSC_WARN(pdata, hw, "MAC busy after read"); + return 0xFFFFFFFF; + } + + /* Set a mac register, mac_lock must be acquired before calling */ + static void smsc911x_mac_write(struct smsc911x_data *pdata, + unsigned int offset, u32 val) + { + unsigned int temp; + + SMSC_ASSERT_MAC_LOCK(pdata); + + temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { + SMSC_WARN(pdata, hw, + "smsc911x_mac_write failed, MAC busy at entry"); + return; + } + + /* Send data to write */ + smsc911x_reg_write(pdata, MAC_CSR_DATA, val); + + /* Write the actual data */ + smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | + MAC_CSR_CMD_CSR_BUSY_)); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + /* Wait for the write to complete */ + if (likely(smsc911x_mac_complete(pdata) == 0)) + return; + + SMSC_WARN(pdata, hw, "smsc911x_mac_write failed, MAC busy after write"); + } + + /* Get a phy register */ + static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx) + { + struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + unsigned long flags; + unsigned int addr; + int i, reg; + + spin_lock_irqsave(&pdata->mac_lock, flags); + + /* Confirm MII not busy */ + if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_read???"); + reg = -EIO; + goto out; + } + + /* Set the address, index & direction (read from PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6); + smsc911x_mac_write(pdata, MII_ACC, addr); + + /* Wait for read to complete w/ timeout */ + for (i = 0; i < 100; i++) + if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + reg = smsc911x_mac_read(pdata, MII_DATA); + goto out; + } + + SMSC_WARN(pdata, hw, "Timed out waiting for MII read to finish"); + reg = -EIO; + + out: + spin_unlock_irqrestore(&pdata->mac_lock, flags); + return reg; + } + + /* Set a phy register */ + static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val) + { + struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + unsigned long flags; + unsigned int addr; + int i, reg; + + spin_lock_irqsave(&pdata->mac_lock, flags); + + /* Confirm MII not busy */ + if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_write???"); + reg = -EIO; + goto out; + } + + /* Put the data to write in the MAC */ + smsc911x_mac_write(pdata, MII_DATA, val); + + /* Set the address, index & direction (write to PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | + MII_ACC_MII_WRITE_; + smsc911x_mac_write(pdata, MII_ACC, addr); + + /* Wait for write to complete w/ timeout */ + for (i = 0; i < 100; i++) + if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + reg = 0; + goto out; + } + + SMSC_WARN(pdata, hw, "Timed out waiting for MII write to finish"); + reg = -EIO; + + out: + spin_unlock_irqrestore(&pdata->mac_lock, flags); + return reg; + } + + /* Switch to external phy. Assumes tx and rx are stopped. */ + static void smsc911x_phy_enable_external(struct smsc911x_data *pdata) + { + unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); + + /* Disable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to stop */ + + /* Switch to external phy */ + hwcfg |= HW_CFG_EXT_PHY_EN_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + + /* Enable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to restart */ + + hwcfg |= HW_CFG_SMI_SEL_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + } + + /* Autodetects and enables external phy if present on supported chips. + * autodetection can be overridden by specifying SMSC911X_FORCE_INTERNAL_PHY + * or SMSC911X_FORCE_EXTERNAL_PHY in the platform_data flags. */ + static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata) + { + unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); + + if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) { + SMSC_TRACE(pdata, hw, "Forcing internal PHY"); + pdata->using_extphy = 0; + } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) { + SMSC_TRACE(pdata, hw, "Forcing external PHY"); + smsc911x_phy_enable_external(pdata); + pdata->using_extphy = 1; + } else if (hwcfg & HW_CFG_EXT_PHY_DET_) { + SMSC_TRACE(pdata, hw, + "HW_CFG EXT_PHY_DET set, using external PHY"); + smsc911x_phy_enable_external(pdata); + pdata->using_extphy = 1; + } else { + SMSC_TRACE(pdata, hw, + "HW_CFG EXT_PHY_DET clear, using internal PHY"); + pdata->using_extphy = 0; + } + } + + /* Fetches a tx status out of the status fifo */ + static unsigned int smsc911x_tx_get_txstatus(struct smsc911x_data *pdata) + { + unsigned int result = + smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TSUSED_; + + if (result != 0) + result = smsc911x_reg_read(pdata, TX_STATUS_FIFO); + + return result; + } + + /* Fetches the next rx status */ + static unsigned int smsc911x_rx_get_rxstatus(struct smsc911x_data *pdata) + { + unsigned int result = + smsc911x_reg_read(pdata, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED_; + + if (result != 0) + result = smsc911x_reg_read(pdata, RX_STATUS_FIFO); + + return result; + } + + #ifdef USE_PHY_WORK_AROUND + static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata) + { + unsigned int tries; + u32 wrsz; + u32 rdsz; + ulong bufp; + + for (tries = 0; tries < 10; tries++) { + unsigned int txcmd_a; + unsigned int txcmd_b; + unsigned int status; + unsigned int pktlength; + unsigned int i; + + /* Zero-out rx packet memory */ + memset(pdata->loopback_rx_pkt, 0, MIN_PACKET_SIZE); + + /* Write tx packet to 118 */ + txcmd_a = (u32)((ulong)pdata->loopback_tx_pkt & 0x03) << 16; + txcmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; + txcmd_a |= MIN_PACKET_SIZE; + + txcmd_b = MIN_PACKET_SIZE << 16 | MIN_PACKET_SIZE; + + smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_a); + smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_b); + + bufp = (ulong)pdata->loopback_tx_pkt & (~0x3); + wrsz = MIN_PACKET_SIZE + 3; + wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3); + wrsz >>= 2; + + pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); + + /* Wait till transmit is done */ + i = 60; + do { + udelay(5); + status = smsc911x_tx_get_txstatus(pdata); + } while ((i--) && (!status)); + + if (!status) { + SMSC_WARN(pdata, hw, + "Failed to transmit during loopback test"); + continue; + } + if (status & TX_STS_ES_) { + SMSC_WARN(pdata, hw, + "Transmit encountered errors during loopback test"); + continue; + } + + /* Wait till receive is done */ + i = 60; + do { + udelay(5); + status = smsc911x_rx_get_rxstatus(pdata); + } while ((i--) && (!status)); + + if (!status) { + SMSC_WARN(pdata, hw, + "Failed to receive during loopback test"); + continue; + } + if (status & RX_STS_ES_) { + SMSC_WARN(pdata, hw, + "Receive encountered errors during loopback test"); + continue; + } + + pktlength = ((status & 0x3FFF0000UL) >> 16); + bufp = (ulong)pdata->loopback_rx_pkt; + rdsz = pktlength + 3; + rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3); + rdsz >>= 2; + + pdata->ops->rx_readfifo(pdata, (unsigned int *)bufp, rdsz); + + if (pktlength != (MIN_PACKET_SIZE + 4)) { + SMSC_WARN(pdata, hw, "Unexpected packet size " + "during loop back test, size=%d, will retry", + pktlength); + } else { + unsigned int j; + int mismatch = 0; + for (j = 0; j < MIN_PACKET_SIZE; j++) { + if (pdata->loopback_tx_pkt[j] + != pdata->loopback_rx_pkt[j]) { + mismatch = 1; + break; + } + } + if (!mismatch) { + SMSC_TRACE(pdata, hw, "Successfully verified " + "loopback packet"); + return 0; + } else { + SMSC_WARN(pdata, hw, "Data mismatch " + "during loop back test, will retry"); + } + } + } + + return -EIO; + } + + static int smsc911x_phy_reset(struct smsc911x_data *pdata) + { + struct phy_device *phy_dev = pdata->phy_dev; + unsigned int temp; + unsigned int i = 100000; + + BUG_ON(!phy_dev); + BUG_ON(!phy_dev->bus); + + SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset"); + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET); + do { + msleep(1); + temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, + MII_BMCR); + } while ((i--) && (temp & BMCR_RESET)); + + if (temp & BMCR_RESET) { + SMSC_WARN(pdata, hw, "PHY reset failed to complete"); + return -EIO; + } + /* Extra delay required because the phy may not be completed with + * its reset when BMCR_RESET is cleared. Specs say 256 uS is + * enough delay but using 1ms here to be safe */ + msleep(1); + + return 0; + } + + static int smsc911x_phy_loopbacktest(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + int result = -EIO; + unsigned int i, val; + unsigned long flags; + + /* Initialise tx packet using broadcast destination address */ + memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN); + + /* Use incrementing source address */ + for (i = 6; i < 12; i++) + pdata->loopback_tx_pkt[i] = (char)i; + + /* Set length type field */ + pdata->loopback_tx_pkt[12] = 0x00; + pdata->loopback_tx_pkt[13] = 0x00; + + for (i = 14; i < MIN_PACKET_SIZE; i++) + pdata->loopback_tx_pkt[i] = (char)i; + + val = smsc911x_reg_read(pdata, HW_CFG); + val &= HW_CFG_TX_FIF_SZ_; + val |= HW_CFG_SF_; + smsc911x_reg_write(pdata, HW_CFG, val); + + smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); + smsc911x_reg_write(pdata, RX_CFG, + (u32)((ulong)pdata->loopback_rx_pkt & 0x03) << 8); + + for (i = 0; i < 10; i++) { + /* Set PHY to 10/FD, no ANEG, and loopback mode */ + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, + BMCR_LOOPBACK | BMCR_FULLDPLX); + + /* Enable MAC tx/rx, FD */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, MAC_CR_FDPX_ + | MAC_CR_TXEN_ | MAC_CR_RXEN_); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + if (smsc911x_phy_check_loopbackpkt(pdata) == 0) { + result = 0; + break; + } + pdata->resetcount++; + + /* Disable MAC rx */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, 0); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_phy_reset(pdata); + } + + /* Disable MAC */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, 0); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + /* Cancel PHY loopback mode */ + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0); + + smsc911x_reg_write(pdata, TX_CFG, 0); + smsc911x_reg_write(pdata, RX_CFG, 0); + + return result; + } + #endif /* USE_PHY_WORK_AROUND */ + + static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) + { + struct phy_device *phy_dev = pdata->phy_dev; + u32 afc = smsc911x_reg_read(pdata, AFC_CFG); + u32 flow; + unsigned long flags; + + if (phy_dev->duplex == DUPLEX_FULL) { + u16 lcladv = phy_read(phy_dev, MII_ADVERTISE); + u16 rmtadv = phy_read(phy_dev, MII_LPA); + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_RX) + flow = 0xFFFF0002; + else + flow = 0; + + if (cap & FLOW_CTRL_TX) + afc |= 0xF; + else + afc &= ~0xF; + + SMSC_TRACE(pdata, hw, "rx pause %s, tx pause %s", + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), + (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); + } else { + SMSC_TRACE(pdata, hw, "half duplex"); + flow = 0; + afc |= 0xF; + } + + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, FLOW, flow); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_reg_write(pdata, AFC_CFG, afc); + } + + /* Update link mode if anything has changed. Called periodically when the + * PHY is in polling mode, even if nothing has changed. */ + static void smsc911x_phy_adjust_link(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + unsigned long flags; + int carrier; + + if (phy_dev->duplex != pdata->last_duplex) { + unsigned int mac_cr; + SMSC_TRACE(pdata, hw, "duplex state has changed"); + + spin_lock_irqsave(&pdata->mac_lock, flags); + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + if (phy_dev->duplex) { + SMSC_TRACE(pdata, hw, + "configuring for full duplex mode"); + mac_cr |= MAC_CR_FDPX_; + } else { + SMSC_TRACE(pdata, hw, + "configuring for half duplex mode"); + mac_cr &= ~MAC_CR_FDPX_; + } + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_phy_update_flowcontrol(pdata); + pdata->last_duplex = phy_dev->duplex; + } + + carrier = netif_carrier_ok(dev); + if (carrier != pdata->last_carrier) { + SMSC_TRACE(pdata, hw, "carrier state has changed"); + if (carrier) { + SMSC_TRACE(pdata, hw, "configuring for carrier OK"); + if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) && + (!pdata->using_extphy)) { + /* Restore original GPIO configuration */ + pdata->gpio_setting = pdata->gpio_orig_setting; + smsc911x_reg_write(pdata, GPIO_CFG, + pdata->gpio_setting); + } + } else { + SMSC_TRACE(pdata, hw, "configuring for no carrier"); + /* Check global setting that LED1 + * usage is 10/100 indicator */ + pdata->gpio_setting = smsc911x_reg_read(pdata, + GPIO_CFG); + if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) && + (!pdata->using_extphy)) { + /* Force 10/100 LED off, after saving + * original GPIO configuration */ + pdata->gpio_orig_setting = pdata->gpio_setting; + + pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_; + pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_ + | GPIO_CFG_GPIODIR0_ + | GPIO_CFG_GPIOD0_); + smsc911x_reg_write(pdata, GPIO_CFG, + pdata->gpio_setting); + } + } + pdata->last_carrier = carrier; + } + } + + static int smsc911x_mii_probe(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phydev = NULL; + int ret; + + /* find the first phy */ + phydev = phy_find_first(pdata->mii_bus); + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } + + SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X", + phydev->addr, phydev->phy_id); + + ret = phy_connect_direct(dev, phydev, + &smsc911x_phy_adjust_link, 0, + pdata->config.phy_interface); + + if (ret) { + netdev_err(dev, "Could not attach to PHY\n"); + return ret; + } + + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + /* mask with MAC supported features */ + phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phydev->advertising = phydev->supported; + + pdata->phy_dev = phydev; + pdata->last_duplex = -1; + pdata->last_carrier = -1; + + #ifdef USE_PHY_WORK_AROUND + if (smsc911x_phy_loopbacktest(dev) < 0) { + SMSC_WARN(pdata, hw, "Failed Loop Back Test"); + return -ENODEV; + } + SMSC_TRACE(pdata, hw, "Passed Loop Back Test"); + #endif /* USE_PHY_WORK_AROUND */ + + SMSC_TRACE(pdata, hw, "phy initialised successfully"); + return 0; + } + + static int __devinit smsc911x_mii_init(struct platform_device *pdev, + struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + int err = -ENXIO, i; + + pdata->mii_bus = mdiobus_alloc(); + if (!pdata->mii_bus) { + err = -ENOMEM; + goto err_out_1; + } + + pdata->mii_bus->name = SMSC_MDIONAME; + snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); + pdata->mii_bus->priv = pdata; + pdata->mii_bus->read = smsc911x_mii_read; + pdata->mii_bus->write = smsc911x_mii_write; + pdata->mii_bus->irq = pdata->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + pdata->mii_bus->irq[i] = PHY_POLL; + + pdata->mii_bus->parent = &pdev->dev; + + switch (pdata->idrev & 0xFFFF0000) { + case 0x01170000: + case 0x01150000: + case 0x117A0000: + case 0x115A0000: + /* External PHY supported, try to autodetect */ + smsc911x_phy_initialise_external(pdata); + break; + default: + SMSC_TRACE(pdata, hw, "External PHY is not supported, " + "using internal PHY"); + pdata->using_extphy = 0; + break; + } + + if (!pdata->using_extphy) { + /* Mask all PHYs except ID 1 (internal) */ + pdata->mii_bus->phy_mask = ~(1 << 1); + } + + if (mdiobus_register(pdata->mii_bus)) { + SMSC_WARN(pdata, probe, "Error registering mii bus"); + goto err_out_free_bus_2; + } + + if (smsc911x_mii_probe(dev) < 0) { + SMSC_WARN(pdata, probe, "Error registering mii bus"); + goto err_out_unregister_bus_3; + } + + return 0; + + err_out_unregister_bus_3: + mdiobus_unregister(pdata->mii_bus); + err_out_free_bus_2: + mdiobus_free(pdata->mii_bus); + err_out_1: + return err; + } + + /* Gets the number of tx statuses in the fifo */ + static unsigned int smsc911x_tx_get_txstatcount(struct smsc911x_data *pdata) + { + return (smsc911x_reg_read(pdata, TX_FIFO_INF) + & TX_FIFO_INF_TSUSED_) >> 16; + } + + /* Reads tx statuses and increments counters where necessary */ + static void smsc911x_tx_update_txcounters(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int tx_stat; + + while ((tx_stat = smsc911x_tx_get_txstatus(pdata)) != 0) { + if (unlikely(tx_stat & 0x80000000)) { + /* In this driver the packet tag is used as the packet + * length. Since a packet length can never reach the + * size of 0x8000, this bit is reserved. It is worth + * noting that the "reserved bit" in the warning above + * does not reference a hardware defined reserved bit + * but rather a driver defined one. + */ + SMSC_WARN(pdata, hw, "Packet tag reserved bit is high"); + } else { + if (unlikely(tx_stat & TX_STS_ES_)) { + dev->stats.tx_errors++; + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += (tx_stat >> 16); + } + if (unlikely(tx_stat & TX_STS_EXCESS_COL_)) { + dev->stats.collisions += 16; + dev->stats.tx_aborted_errors += 1; + } else { + dev->stats.collisions += + ((tx_stat >> 3) & 0xF); + } + if (unlikely(tx_stat & TX_STS_LOST_CARRIER_)) + dev->stats.tx_carrier_errors += 1; + if (unlikely(tx_stat & TX_STS_LATE_COL_)) { + dev->stats.collisions++; + dev->stats.tx_aborted_errors++; + } + } + } + } + + /* Increments the Rx error counters */ + static void + smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat) + { + int crc_err = 0; + + if (unlikely(rxstat & RX_STS_ES_)) { + dev->stats.rx_errors++; + if (unlikely(rxstat & RX_STS_CRC_ERR_)) { + dev->stats.rx_crc_errors++; + crc_err = 1; + } + } + if (likely(!crc_err)) { + if (unlikely((rxstat & RX_STS_FRAME_TYPE_) && + (rxstat & RX_STS_LENGTH_ERR_))) + dev->stats.rx_length_errors++; + if (rxstat & RX_STS_MCAST_) + dev->stats.multicast++; + } + } + + /* Quickly dumps bad packets */ + static void + smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) + { + unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2; + + if (likely(pktwords >= 4)) { + unsigned int timeout = 500; + unsigned int val; + smsc911x_reg_write(pdata, RX_DP_CTRL, RX_DP_CTRL_RX_FFWD_); + do { + udelay(1); + val = smsc911x_reg_read(pdata, RX_DP_CTRL); + } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout); + + if (unlikely(timeout == 0)) + SMSC_WARN(pdata, hw, "Timed out waiting for " + "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val); + } else { + unsigned int temp; + while (pktwords--) + temp = smsc911x_reg_read(pdata, RX_DATA_FIFO); + } + } + + /* NAPI poll function */ + static int smsc911x_poll(struct napi_struct *napi, int budget) + { + struct smsc911x_data *pdata = + container_of(napi, struct smsc911x_data, napi); + struct net_device *dev = pdata->dev; + int npackets = 0; + + while (npackets < budget) { + unsigned int pktlength; + unsigned int pktwords; + struct sk_buff *skb; + unsigned int rxstat = smsc911x_rx_get_rxstatus(pdata); + + if (!rxstat) { + unsigned int temp; + /* We processed all packets available. Tell NAPI it can + * stop polling then re-enable rx interrupts */ + smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); + napi_complete(napi); + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= INT_EN_RSFL_EN_; + smsc911x_reg_write(pdata, INT_EN, temp); + break; + } + + /* Count packet for NAPI scheduling, even if it has an error. + * Error packets still require cycles to discard */ + npackets++; + + pktlength = ((rxstat & 0x3FFF0000) >> 16); + pktwords = (pktlength + NET_IP_ALIGN + 3) >> 2; + smsc911x_rx_counterrors(dev, rxstat); + + if (unlikely(rxstat & RX_STS_ES_)) { + SMSC_WARN(pdata, rx_err, + "Discarding packet with error bit set"); + /* Packet has an error, discard it and continue with + * the next */ + smsc911x_rx_fastforward(pdata, pktwords); + dev->stats.rx_dropped++; + continue; + } + + skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); + if (unlikely(!skb)) { + SMSC_WARN(pdata, rx_err, + "Unable to allocate skb for rx packet"); + /* Drop the packet and stop this polling iteration */ + smsc911x_rx_fastforward(pdata, pktwords); + dev->stats.rx_dropped++; + break; + } + + skb->data = skb->head; + skb_reset_tail_pointer(skb); + + /* Align IP on 16B boundary */ + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pktlength - 4); + pdata->ops->rx_readfifo(pdata, + (unsigned int *)skb->head, pktwords); + skb->protocol = eth_type_trans(skb, dev); + skb_checksum_none_assert(skb); + netif_receive_skb(skb); + + /* Update counters */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += (pktlength - 4); + } + + /* Return total received packets */ + return npackets; + } + + /* Returns hash bit number for given MAC address + * Example: + * 01 00 5E 00 00 01 -> returns bit number 31 */ + static unsigned int smsc911x_hash(char addr[ETH_ALEN]) + { + return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f; + } + + static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata) + { + /* Performs the multicast & mac_cr update. This is called when + * safe on the current hardware, and with the mac_lock held */ + unsigned int mac_cr; + + SMSC_ASSERT_MAC_LOCK(pdata); + + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + mac_cr |= pdata->set_bits_mask; + mac_cr &= ~(pdata->clear_bits_mask); + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + smsc911x_mac_write(pdata, HASHH, pdata->hashhi); + smsc911x_mac_write(pdata, HASHL, pdata->hashlo); + SMSC_TRACE(pdata, hw, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X", + mac_cr, pdata->hashhi, pdata->hashlo); + } + + static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) + { + unsigned int mac_cr; + + /* This function is only called for older LAN911x devices + * (revA or revB), where MAC_CR, HASHH and HASHL should not + * be modified during Rx - newer devices immediately update the + * registers. + * + * This is called from interrupt context */ + + spin_lock(&pdata->mac_lock); + + /* Check Rx has stopped */ + if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_) + SMSC_WARN(pdata, drv, "Rx not stopped"); + + /* Perform the update - safe to do now Rx has stopped */ + smsc911x_rx_multicast_update(pdata); + + /* Re-enable Rx */ + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + mac_cr |= MAC_CR_RXEN_; + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + + pdata->multicast_update_pending = 0; + + spin_unlock(&pdata->mac_lock); + } + + static int smsc911x_soft_reset(struct smsc911x_data *pdata) + { + unsigned int timeout; + unsigned int temp; + + /* Reset the LAN911x */ + smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_); + timeout = 10; + do { + udelay(10); + temp = smsc911x_reg_read(pdata, HW_CFG); + } while ((--timeout) && (temp & HW_CFG_SRST_)); + + if (unlikely(temp & HW_CFG_SRST_)) { + SMSC_WARN(pdata, drv, "Failed to complete reset"); + return -EIO; + } + return 0; + } + + /* Sets the device MAC address to dev_addr, called with mac_lock held */ + static void + smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6]) + { + u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; + u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + + SMSC_ASSERT_MAC_LOCK(pdata); + + smsc911x_mac_write(pdata, ADDRH, mac_high16); + smsc911x_mac_write(pdata, ADDRL, mac_low32); + } + + static int smsc911x_open(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int timeout; + unsigned int temp; + unsigned int intcfg; + + /* if the phy is not yet registered, retry later*/ + if (!pdata->phy_dev) { + SMSC_WARN(pdata, hw, "phy_dev is NULL"); + return -EAGAIN; + } + + if (!is_valid_ether_addr(dev->dev_addr)) { + SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address"); + return -EADDRNOTAVAIL; + } + + /* Reset the LAN911x */ + if (smsc911x_soft_reset(pdata)) { + SMSC_WARN(pdata, hw, "soft reset failed"); + return -EIO; + } + + smsc911x_reg_write(pdata, HW_CFG, 0x00050000); + smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); + + /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q); + spin_unlock_irq(&pdata->mac_lock); + + /* Make sure EEPROM has finished loading before setting GPIO_CFG */ + timeout = 50; + while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && + --timeout) { + udelay(10); + } + + if (unlikely(timeout == 0)) + SMSC_WARN(pdata, ifup, + "Timed out waiting for EEPROM busy bit to clear"); + + smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000); + + /* The soft reset above cleared the device's MAC address, + * restore it from local copy (set in probe) */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + spin_unlock_irq(&pdata->mac_lock); + + /* Initialise irqs, but leave all sources disabled */ + smsc911x_reg_write(pdata, INT_EN, 0); + smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); + + /* Set interrupt deassertion to 100uS */ + intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); + + if (pdata->config.irq_polarity) { + SMSC_TRACE(pdata, ifup, "irq polarity: active high"); + intcfg |= INT_CFG_IRQ_POL_; + } else { + SMSC_TRACE(pdata, ifup, "irq polarity: active low"); + } + + if (pdata->config.irq_type) { + SMSC_TRACE(pdata, ifup, "irq type: push-pull"); + intcfg |= INT_CFG_IRQ_TYPE_; + } else { + SMSC_TRACE(pdata, ifup, "irq type: open drain"); + } + + smsc911x_reg_write(pdata, INT_CFG, intcfg); + + SMSC_TRACE(pdata, ifup, "Testing irq handler using IRQ %d", dev->irq); + pdata->software_irq_signal = 0; + smp_wmb(); + + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= INT_EN_SW_INT_EN_; + smsc911x_reg_write(pdata, INT_EN, temp); + + timeout = 1000; + while (timeout--) { + if (pdata->software_irq_signal) + break; + msleep(1); + } + + if (!pdata->software_irq_signal) { + netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", + dev->irq); + return -ENODEV; + } + SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", + dev->irq); + + netdev_info(dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n", + (unsigned long)pdata->ioaddr, dev->irq); + + /* Reset the last known duplex and carrier */ + pdata->last_duplex = -1; + pdata->last_carrier = -1; + + /* Bring the PHY up */ + phy_start(pdata->phy_dev); + + temp = smsc911x_reg_read(pdata, HW_CFG); + /* Preserve TX FIFO size and external PHY configuration */ + temp &= (HW_CFG_TX_FIF_SZ_|0x00000FFF); + temp |= HW_CFG_SF_; + smsc911x_reg_write(pdata, HW_CFG, temp); + + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + temp &= ~(FIFO_INT_RX_STS_LEVEL_); + smsc911x_reg_write(pdata, FIFO_INT, temp); + + /* set RX Data offset to 2 bytes for alignment */ + smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); + + /* enable NAPI polling before enabling RX interrupts */ + napi_enable(&pdata->napi); + + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_RXSTOP_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + + spin_lock_irq(&pdata->mac_lock); + temp = smsc911x_mac_read(pdata, MAC_CR); + temp |= (MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); + smsc911x_mac_write(pdata, MAC_CR, temp); + spin_unlock_irq(&pdata->mac_lock); + + smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); + + netif_start_queue(dev); + return 0; + } + + /* Entry point for stopping the interface */ + static int smsc911x_stop(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int temp; + + /* Disable all device interrupts */ + temp = smsc911x_reg_read(pdata, INT_CFG); + temp &= ~INT_CFG_IRQ_EN_; + smsc911x_reg_write(pdata, INT_CFG, temp); + + /* Stop Tx and Rx polling */ + netif_stop_queue(dev); + napi_disable(&pdata->napi); + + /* At this point all Rx and Tx activity is stopped */ + dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); + smsc911x_tx_update_txcounters(dev); + + /* Bring the PHY down */ + if (pdata->phy_dev) + phy_stop(pdata->phy_dev); + + SMSC_TRACE(pdata, ifdown, "Interface stopped"); + return 0; + } + + /* Entry point for transmitting a packet */ + static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int freespace; + unsigned int tx_cmd_a; + unsigned int tx_cmd_b; + unsigned int temp; + u32 wrsz; + ulong bufp; + + freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_; + + if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD)) + SMSC_WARN(pdata, tx_err, + "Tx data fifo low, space available: %d", freespace); + + /* Word alignment adjustment */ + tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16; + tx_cmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; + tx_cmd_a |= (unsigned int)skb->len; + + tx_cmd_b = ((unsigned int)skb->len) << 16; + tx_cmd_b |= (unsigned int)skb->len; + + smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_a); + smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_b); + + bufp = (ulong)skb->data & (~0x3); + wrsz = (u32)skb->len + 3; + wrsz += (u32)((ulong)skb->data & 0x3); + wrsz >>= 2; + + pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); + freespace -= (skb->len + 32); + skb_tx_timestamp(skb); + dev_kfree_skb(skb); + + if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30)) + smsc911x_tx_update_txcounters(dev); + + if (freespace < TX_FIFO_LOW_THRESHOLD) { + netif_stop_queue(dev); + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp &= 0x00FFFFFF; + temp |= 0x32000000; + smsc911x_reg_write(pdata, FIFO_INT, temp); + } + + return NETDEV_TX_OK; + } + + /* Entry point for getting status counters */ + static struct net_device_stats *smsc911x_get_stats(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + smsc911x_tx_update_txcounters(dev); + dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); + return &dev->stats; + } + + /* Entry point for setting addressing modes */ + static void smsc911x_set_multicast_list(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned long flags; + + if (dev->flags & IFF_PROMISC) { + /* Enabling promiscuous mode */ + pdata->set_bits_mask = MAC_CR_PRMS_; + pdata->clear_bits_mask = (MAC_CR_MCPAS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } else if (dev->flags & IFF_ALLMULTI) { + /* Enabling all multicast mode */ + pdata->set_bits_mask = MAC_CR_MCPAS_; + pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } else if (!netdev_mc_empty(dev)) { + /* Enabling specific multicast addresses */ + unsigned int hash_high = 0; + unsigned int hash_low = 0; + struct netdev_hw_addr *ha; + + pdata->set_bits_mask = MAC_CR_HPFILT_; + pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); + + netdev_for_each_mc_addr(ha, dev) { + unsigned int bitnum = smsc911x_hash(ha->addr); + unsigned int mask = 0x01 << (bitnum & 0x1F); + + if (bitnum & 0x20) + hash_high |= mask; + else + hash_low |= mask; + } + + pdata->hashhi = hash_high; + pdata->hashlo = hash_low; + } else { + /* Enabling local MAC address only */ + pdata->set_bits_mask = 0; + pdata->clear_bits_mask = + (MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } + + spin_lock_irqsave(&pdata->mac_lock, flags); + + if (pdata->generation <= 1) { + /* Older hardware revision - cannot change these flags while + * receiving data */ + if (!pdata->multicast_update_pending) { + unsigned int temp; + SMSC_TRACE(pdata, hw, "scheduling mcast update"); + pdata->multicast_update_pending = 1; + + /* Request the hardware to stop, then perform the + * update when we get an RX_STOP interrupt */ + temp = smsc911x_mac_read(pdata, MAC_CR); + temp &= ~(MAC_CR_RXEN_); + smsc911x_mac_write(pdata, MAC_CR, temp); + } else { + /* There is another update pending, this should now + * use the newer values */ + } + } else { + /* Newer hardware revision - can write immediately */ + smsc911x_rx_multicast_update(pdata); + } + + spin_unlock_irqrestore(&pdata->mac_lock, flags); + } + + static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) + { + struct net_device *dev = dev_id; + struct smsc911x_data *pdata = netdev_priv(dev); + u32 intsts = smsc911x_reg_read(pdata, INT_STS); + u32 inten = smsc911x_reg_read(pdata, INT_EN); + int serviced = IRQ_NONE; + u32 temp; + + if (unlikely(intsts & inten & INT_STS_SW_INT_)) { + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_SW_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); + pdata->software_irq_signal = 1; + smp_wmb(); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { + /* Called when there is a multicast update scheduled and + * it is now safe to complete the update */ + SMSC_TRACE(pdata, intr, "RX Stop interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); + if (pdata->multicast_update_pending) + smsc911x_rx_multicast_update_workaround(pdata); + serviced = IRQ_HANDLED; + } + + if (intsts & inten & INT_STS_TDFA_) { + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + smsc911x_reg_write(pdata, FIFO_INT, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); + netif_wake_queue(dev); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXE_)) { + SMSC_TRACE(pdata, intr, "RX Error interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); + serviced = IRQ_HANDLED; + } + + if (likely(intsts & inten & INT_STS_RSFL_)) { + if (likely(napi_schedule_prep(&pdata->napi))) { + /* Disable Rx interrupts */ + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_RSFL_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + /* Schedule a NAPI poll */ + __napi_schedule(&pdata->napi); + } else { + SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); + } + serviced = IRQ_HANDLED; + } + + return serviced; + } + + #ifdef CONFIG_NET_POLL_CONTROLLER + static void smsc911x_poll_controller(struct net_device *dev) + { + disable_irq(dev->irq); + smsc911x_irqhandler(0, dev); + enable_irq(dev->irq); + } + #endif /* CONFIG_NET_POLL_CONTROLLER */ + + static int smsc911x_set_mac_address(struct net_device *dev, void *p) + { + struct smsc911x_data *pdata = netdev_priv(dev); + struct sockaddr *addr = p; + + /* On older hardware revisions we cannot change the mac address + * registers while receiving data. Newer devices can safely change + * this at any time. */ + if (pdata->generation <= 1 && netif_running(dev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + spin_lock_irq(&pdata->mac_lock); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + spin_unlock_irq(&pdata->mac_lock); + + netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; + } + + /* Standard ioctls for mii-tool */ + static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + { + struct smsc911x_data *pdata = netdev_priv(dev); + + if (!netif_running(dev) || !pdata->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(pdata->phy_dev, ifr, cmd); + } + + static int + smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct smsc911x_data *pdata = netdev_priv(dev); + + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + return phy_ethtool_gset(pdata->phy_dev, cmd); + } + + static int + smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct smsc911x_data *pdata = netdev_priv(dev); + + return phy_ethtool_sset(pdata->phy_dev, cmd); + } + + static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) + { + strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver)); + strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); + } + + static int smsc911x_ethtool_nwayreset(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + + return phy_start_aneg(pdata->phy_dev); + } + + static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + return pdata->msg_enable; + } + + static void smsc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) + { + struct smsc911x_data *pdata = netdev_priv(dev); + pdata->msg_enable = level; + } + + static int smsc911x_ethtool_getregslen(struct net_device *dev) + { + return (((E2P_DATA - ID_REV) / 4 + 1) + (WUCSR - MAC_CR) + 1 + 32) * + sizeof(u32); + } + + static void + smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) + { + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + unsigned long flags; + unsigned int i; + unsigned int j = 0; + u32 *data = buf; + + regs->version = pdata->idrev; + for (i = ID_REV; i <= E2P_DATA; i += (sizeof(u32))) + data[j++] = smsc911x_reg_read(pdata, i); + + for (i = MAC_CR; i <= WUCSR; i++) { + spin_lock_irqsave(&pdata->mac_lock, flags); + data[j++] = smsc911x_mac_read(pdata, i); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + } + + for (i = 0; i <= 31; i++) + data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i); + } + + static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata) + { + unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG); + temp &= ~GPIO_CFG_EEPR_EN_; + smsc911x_reg_write(pdata, GPIO_CFG, temp); + msleep(1); + } + + static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op) + { + int timeout = 100; + u32 e2cmd; + + SMSC_TRACE(pdata, drv, "op 0x%08x", op); + if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) { + SMSC_WARN(pdata, drv, "Busy at start"); + return -EBUSY; + } + + e2cmd = op | E2P_CMD_EPC_BUSY_; + smsc911x_reg_write(pdata, E2P_CMD, e2cmd); + + do { + msleep(1); + e2cmd = smsc911x_reg_read(pdata, E2P_CMD); + } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); + + if (!timeout) { + SMSC_TRACE(pdata, drv, "TIMED OUT"); + return -EAGAIN; + } + + if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { + SMSC_TRACE(pdata, drv, "Error occurred during eeprom operation"); + return -EINVAL; + } + + return 0; + } + + static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata, + u8 address, u8 *data) + { + u32 op = E2P_CMD_EPC_CMD_READ_ | address; + int ret; + + SMSC_TRACE(pdata, drv, "address 0x%x", address); + ret = smsc911x_eeprom_send_cmd(pdata, op); + + if (!ret) + data[address] = smsc911x_reg_read(pdata, E2P_DATA); + + return ret; + } + + static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata, + u8 address, u8 data) + { + u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; + u32 temp; + int ret; + + SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data); + ret = smsc911x_eeprom_send_cmd(pdata, op); + + if (!ret) { + op = E2P_CMD_EPC_CMD_WRITE_ | address; + smsc911x_reg_write(pdata, E2P_DATA, (u32)data); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + ret = smsc911x_eeprom_send_cmd(pdata, op); + } + + return ret; + } + + static int smsc911x_ethtool_get_eeprom_len(struct net_device *dev) + { + return SMSC911X_EEPROM_SIZE; + } + + static int smsc911x_ethtool_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) + { + struct smsc911x_data *pdata = netdev_priv(dev); + u8 eeprom_data[SMSC911X_EEPROM_SIZE]; + int len; + int i; + + smsc911x_eeprom_enable_access(pdata); + + len = min(eeprom->len, SMSC911X_EEPROM_SIZE); + for (i = 0; i < len; i++) { + int ret = smsc911x_eeprom_read_location(pdata, i, eeprom_data); + if (ret < 0) { + eeprom->len = 0; + return ret; + } + } + + memcpy(data, &eeprom_data[eeprom->offset], len); + eeprom->len = len; + return 0; + } + + static int smsc911x_ethtool_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) + { + int ret; + struct smsc911x_data *pdata = netdev_priv(dev); + + smsc911x_eeprom_enable_access(pdata); + smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWEN_); + ret = smsc911x_eeprom_write_location(pdata, eeprom->offset, *data); + smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWDS_); + + /* Single byte write, according to man page */ + eeprom->len = 1; + + return ret; + } + + static const struct ethtool_ops smsc911x_ethtool_ops = { + .get_settings = smsc911x_ethtool_getsettings, + .set_settings = smsc911x_ethtool_setsettings, + .get_link = ethtool_op_get_link, + .get_drvinfo = smsc911x_ethtool_getdrvinfo, + .nway_reset = smsc911x_ethtool_nwayreset, + .get_msglevel = smsc911x_ethtool_getmsglevel, + .set_msglevel = smsc911x_ethtool_setmsglevel, + .get_regs_len = smsc911x_ethtool_getregslen, + .get_regs = smsc911x_ethtool_getregs, + .get_eeprom_len = smsc911x_ethtool_get_eeprom_len, + .get_eeprom = smsc911x_ethtool_get_eeprom, + .set_eeprom = smsc911x_ethtool_set_eeprom, + }; + + static const struct net_device_ops smsc911x_netdev_ops = { + .ndo_open = smsc911x_open, + .ndo_stop = smsc911x_stop, + .ndo_start_xmit = smsc911x_hard_start_xmit, + .ndo_get_stats = smsc911x_get_stats, + .ndo_set_rx_mode = smsc911x_set_multicast_list, + .ndo_do_ioctl = smsc911x_do_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = smsc911x_set_mac_address, + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smsc911x_poll_controller, + #endif + }; + + /* copies the current mac address from hardware to dev->dev_addr */ + static void __devinit smsc911x_read_mac_address(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); + u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); + + dev->dev_addr[0] = (u8)(mac_low32); + dev->dev_addr[1] = (u8)(mac_low32 >> 8); + dev->dev_addr[2] = (u8)(mac_low32 >> 16); + dev->dev_addr[3] = (u8)(mac_low32 >> 24); + dev->dev_addr[4] = (u8)(mac_high16); + dev->dev_addr[5] = (u8)(mac_high16 >> 8); + } + + /* Initializing private device structures, only called from probe */ + static int __devinit smsc911x_init(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int byte_test; + + SMSC_TRACE(pdata, probe, "Driver Parameters:"); + SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX", + (unsigned long)pdata->ioaddr); + SMSC_TRACE(pdata, probe, "IRQ: %d", dev->irq); + SMSC_TRACE(pdata, probe, "PHY will be autodetected."); + + spin_lock_init(&pdata->dev_lock); + spin_lock_init(&pdata->mac_lock); + + if (pdata->ioaddr == 0) { + SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000"); + return -ENODEV; + } + + /* Check byte ordering */ + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test); + if (byte_test == 0x43218765) { + SMSC_TRACE(pdata, probe, "BYTE_TEST looks swapped, " + "applying WORD_SWAP"); + smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff); + + /* 1 dummy read of BYTE_TEST is needed after a write to + * WORD_SWAP before its contents are valid */ + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + } + + if (byte_test != 0x87654321) { + SMSC_WARN(pdata, drv, "BYTE_TEST: 0x%08X", byte_test); + if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) { + SMSC_WARN(pdata, probe, + "top 16 bits equal to bottom 16 bits"); + SMSC_TRACE(pdata, probe, + "This may mean the chip is set " + "for 32 bit while the bus is reading 16 bit"); + } + return -ENODEV; + } + + /* Default generation to zero (all workarounds apply) */ + pdata->generation = 0; + + pdata->idrev = smsc911x_reg_read(pdata, ID_REV); + switch (pdata->idrev & 0xFFFF0000) { + case 0x01180000: + case 0x01170000: + case 0x01160000: + case 0x01150000: ++ case 0x218A0000: + /* LAN911[5678] family */ + pdata->generation = pdata->idrev & 0x0000FFFF; + break; + + case 0x118A0000: + case 0x117A0000: + case 0x116A0000: + case 0x115A0000: + /* LAN921[5678] family */ + pdata->generation = 3; + break; + + case 0x92100000: + case 0x92110000: + case 0x92200000: + case 0x92210000: + /* LAN9210/LAN9211/LAN9220/LAN9221 */ + pdata->generation = 4; + break; + + default: + SMSC_WARN(pdata, probe, "LAN911x not identified, idrev: 0x%08X", + pdata->idrev); + return -ENODEV; + } + + SMSC_TRACE(pdata, probe, + "LAN911x identified, idrev: 0x%08X, generation: %d", + pdata->idrev, pdata->generation); + + if (pdata->generation == 0) + SMSC_WARN(pdata, probe, + "This driver is not intended for this chip revision"); + + /* workaround for platforms without an eeprom, where the mac address + * is stored elsewhere and set by the bootloader. This saves the + * mac address before resetting the device */ + if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) { + spin_lock_irq(&pdata->mac_lock); + smsc911x_read_mac_address(dev); + spin_unlock_irq(&pdata->mac_lock); + } + + /* Reset the LAN911x */ + if (smsc911x_soft_reset(pdata)) + return -ENODEV; + + /* Disable all interrupt sources until we bring the device up */ + smsc911x_reg_write(pdata, INT_EN, 0); + + ether_setup(dev); + dev->flags |= IFF_MULTICAST; + netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); + dev->netdev_ops = &smsc911x_netdev_ops; + dev->ethtool_ops = &smsc911x_ethtool_ops; + + return 0; + } + + static int __devexit smsc911x_drv_remove(struct platform_device *pdev) + { + struct net_device *dev; + struct smsc911x_data *pdata; + struct resource *res; + + dev = platform_get_drvdata(pdev); + BUG_ON(!dev); + pdata = netdev_priv(dev); + BUG_ON(!pdata); + BUG_ON(!pdata->ioaddr); + BUG_ON(!pdata->phy_dev); + + SMSC_TRACE(pdata, ifdown, "Stopping driver"); + + phy_disconnect(pdata->phy_dev); + pdata->phy_dev = NULL; + mdiobus_unregister(pdata->mii_bus); + mdiobus_free(pdata->mii_bus); + + platform_set_drvdata(pdev, NULL); + unregister_netdev(dev); + free_irq(dev->irq, dev); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smsc911x-memory"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + release_mem_region(res->start, resource_size(res)); + + iounmap(pdata->ioaddr); + + free_netdev(dev); + + return 0; + } + + /* standard register acces */ + static const struct smsc911x_ops standard_smsc911x_ops = { + .reg_read = __smsc911x_reg_read, + .reg_write = __smsc911x_reg_write, + .rx_readfifo = smsc911x_rx_readfifo, + .tx_writefifo = smsc911x_tx_writefifo, + }; + + /* shifted register access */ + static const struct smsc911x_ops shifted_smsc911x_ops = { + .reg_read = __smsc911x_reg_read_shift, + .reg_write = __smsc911x_reg_write_shift, + .rx_readfifo = smsc911x_rx_readfifo_shift, + .tx_writefifo = smsc911x_tx_writefifo_shift, + }; + + #ifdef CONFIG_OF + static int __devinit smsc911x_probe_config_dt( + struct smsc911x_platform_config *config, + struct device_node *np) + { + const char *mac; + u32 width = 0; + + if (!np) + return -ENODEV; + + config->phy_interface = of_get_phy_mode(np); + + mac = of_get_mac_address(np); + if (mac) + memcpy(config->mac, mac, ETH_ALEN); + + of_property_read_u32(np, "reg-shift", &config->shift); + + of_property_read_u32(np, "reg-io-width", &width); + if (width == 4) + config->flags |= SMSC911X_USE_32BIT; + else + config->flags |= SMSC911X_USE_16BIT; + + if (of_get_property(np, "smsc,irq-active-high", NULL)) + config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH; + + if (of_get_property(np, "smsc,irq-push-pull", NULL)) + config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL; + + if (of_get_property(np, "smsc,force-internal-phy", NULL)) + config->flags |= SMSC911X_FORCE_INTERNAL_PHY; + + if (of_get_property(np, "smsc,force-external-phy", NULL)) + config->flags |= SMSC911X_FORCE_EXTERNAL_PHY; + + if (of_get_property(np, "smsc,save-mac-address", NULL)) + config->flags |= SMSC911X_SAVE_MAC_ADDRESS; + + return 0; + } + #else + static inline int smsc911x_probe_config_dt( + struct smsc911x_platform_config *config, + struct device_node *np) + { + return -ENODEV; + } + #endif /* CONFIG_OF */ + + static int __devinit smsc911x_drv_probe(struct platform_device *pdev) + { + struct device_node *np = pdev->dev.of_node; + struct net_device *dev; + struct smsc911x_data *pdata; + struct smsc911x_platform_config *config = pdev->dev.platform_data; + struct resource *res, *irq_res; + unsigned int intcfg = 0; + int res_size, irq_flags; + int retval; + + pr_info("Driver version %s\n", SMSC_DRV_VERSION); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smsc911x-memory"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + pr_warn("Could not allocate resource\n"); + retval = -ENODEV; + goto out_0; + } + res_size = resource_size(res); + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + pr_warn("Could not allocate irq resource\n"); + retval = -ENODEV; + goto out_0; + } + + if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { + retval = -EBUSY; + goto out_0; + } + + dev = alloc_etherdev(sizeof(struct smsc911x_data)); + if (!dev) { + pr_warn("Could not allocate device\n"); + retval = -ENOMEM; + goto out_release_io_1; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + pdata = netdev_priv(dev); + + dev->irq = irq_res->start; + irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; + pdata->ioaddr = ioremap_nocache(res->start, res_size); + + pdata->dev = dev; + pdata->msg_enable = ((1 << debug) - 1); + + if (pdata->ioaddr == NULL) { + SMSC_WARN(pdata, probe, "Error smsc911x base address invalid"); + retval = -ENOMEM; + goto out_free_netdev_2; + } + + retval = smsc911x_probe_config_dt(&pdata->config, np); + if (retval && config) { + /* copy config parameters across to pdata */ + memcpy(&pdata->config, config, sizeof(pdata->config)); + retval = 0; + } + + if (retval) { + SMSC_WARN(pdata, probe, "Error smsc911x config not found"); + goto out_unmap_io_3; + } + + /* assume standard, non-shifted, access to HW registers */ + pdata->ops = &standard_smsc911x_ops; + /* apply the right access if shifting is needed */ + if (pdata->config.shift) + pdata->ops = &shifted_smsc911x_ops; + + retval = smsc911x_init(dev); + if (retval < 0) + goto out_unmap_io_3; + + /* configure irq polarity and type before connecting isr */ + if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) + intcfg |= INT_CFG_IRQ_POL_; + + if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL) + intcfg |= INT_CFG_IRQ_TYPE_; + + smsc911x_reg_write(pdata, INT_CFG, intcfg); + + /* Ensure interrupts are globally disabled before connecting ISR */ + smsc911x_reg_write(pdata, INT_EN, 0); + smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); + + retval = request_irq(dev->irq, smsc911x_irqhandler, + irq_flags | IRQF_SHARED, dev->name, dev); + if (retval) { + SMSC_WARN(pdata, probe, + "Unable to claim requested irq: %d", dev->irq); + goto out_unmap_io_3; + } + + platform_set_drvdata(pdev, dev); + + retval = register_netdev(dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i registering device", retval); + goto out_unset_drvdata_4; + } else { + SMSC_TRACE(pdata, probe, + "Network interface: "%s"", dev->name); + } + + retval = smsc911x_mii_init(pdev, dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); + goto out_unregister_netdev_5; + } + + spin_lock_irq(&pdata->mac_lock); + + /* Check if mac address has been specified when bringing interface up */ + if (is_valid_ether_addr(dev->dev_addr)) { + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + SMSC_TRACE(pdata, probe, + "MAC Address is specified by configuration"); + } else if (is_valid_ether_addr(pdata->config.mac)) { + memcpy(dev->dev_addr, pdata->config.mac, 6); + SMSC_TRACE(pdata, probe, + "MAC Address specified by platform data"); + } else { + /* Try reading mac address from device. if EEPROM is present + * it will already have been set */ + smsc_get_mac(dev); + + if (is_valid_ether_addr(dev->dev_addr)) { + /* eeprom values are valid so use them */ + SMSC_TRACE(pdata, probe, + "Mac Address is read from LAN911x EEPROM"); + } else { + /* eeprom values are invalid, generate random MAC */ + random_ether_addr(dev->dev_addr); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + SMSC_TRACE(pdata, probe, + "MAC Address is set to random_ether_addr"); + } + } + + spin_unlock_irq(&pdata->mac_lock); + + netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; + + out_unregister_netdev_5: + unregister_netdev(dev); + out_unset_drvdata_4: + platform_set_drvdata(pdev, NULL); + free_irq(dev->irq, dev); + out_unmap_io_3: + iounmap(pdata->ioaddr); + out_free_netdev_2: + free_netdev(dev); + out_release_io_1: + release_mem_region(res->start, resource_size(res)); + out_0: + return retval; + } + + #ifdef CONFIG_PM + /* This implementation assumes the devices remains powered on its VDDVARIO + * pins during suspend. */ + + /* TODO: implement freeze/thaw callbacks for hibernation.*/ + + static int smsc911x_suspend(struct device *dev) + { + struct net_device *ndev = dev_get_drvdata(dev); + struct smsc911x_data *pdata = netdev_priv(ndev); + + /* enable wake on LAN, energy detection and the external PME + * signal. */ + smsc911x_reg_write(pdata, PMT_CTRL, + PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ | + PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_); + + return 0; + } + + static int smsc911x_resume(struct device *dev) + { + struct net_device *ndev = dev_get_drvdata(dev); + struct smsc911x_data *pdata = netdev_priv(ndev); + unsigned int to = 100; + + /* Note 3.11 from the datasheet: + * "When the LAN9220 is in a power saving state, a write of any + * data to the BYTE_TEST register will wake-up the device." + */ + smsc911x_reg_write(pdata, BYTE_TEST, 0); + + /* poll the READY bit in PMT_CTRL. Any other access to the device is + * forbidden while this bit isn't set. Try for 100ms and return -EIO + * if it failed. */ + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) + udelay(1000); + + return (to == 0) ? -EIO : 0; + } + + static const struct dev_pm_ops smsc911x_pm_ops = { + .suspend = smsc911x_suspend, + .resume = smsc911x_resume, + }; + + #define SMSC911X_PM_OPS (&smsc911x_pm_ops) + + #else + #define SMSC911X_PM_OPS NULL + #endif + + static const struct of_device_id smsc911x_dt_ids[] = { + { .compatible = "smsc,lan9115", }, + { /* sentinel */ } + }; + MODULE_DEVICE_TABLE(of, smsc911x_dt_ids); + + static struct platform_driver smsc911x_driver = { + .probe = smsc911x_drv_probe, + .remove = __devexit_p(smsc911x_drv_remove), + .driver = { + .name = SMSC_CHIPNAME, + .owner = THIS_MODULE, + .pm = SMSC911X_PM_OPS, + .of_match_table = smsc911x_dt_ids, + }, + }; + + /* Entry point for loading the module */ + static int __init smsc911x_init_module(void) + { + SMSC_INITIALIZE(); + return platform_driver_register(&smsc911x_driver); + } + + /* entry point for unloading the module */ + static void __exit smsc911x_cleanup_module(void) + { + platform_driver_unregister(&smsc911x_driver); + } + + module_init(smsc911x_init_module); + module_exit(smsc911x_cleanup_module); diff --combined drivers/net/ethernet/toshiba/ps3_gelic_net.c index 0000000,ddb33cf..7bf1e20 mode 000000,100644..100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@@ -1,0 -1,1896 +1,1899 @@@ + /* + * PS3 gelic network driver. + * + * Copyright (C) 2007 Sony Computer Entertainment Inc. + * Copyright 2006, 2007 Sony Corporation + * + * This file is based on: spider_net.c + * + * (C) Copyright IBM Corp. 2005 + * + * Authors : Utz Bacher utz.bacher@de.ibm.com + * Jens Osterkamp Jens.Osterkamp@de.ibm.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + #undef DEBUG + + #include <linux/interrupt.h> + #include <linux/kernel.h> + #include <linux/module.h> + #include <linux/slab.h> + + #include <linux/etherdevice.h> + #include <linux/ethtool.h> + #include <linux/if_vlan.h> + + #include <linux/in.h> + #include <linux/ip.h> + #include <linux/tcp.h> + + #include <linux/dma-mapping.h> + #include <net/checksum.h> + #include <asm/firmware.h> + #include <asm/ps3.h> + #include <asm/lv1call.h> + + #include "ps3_gelic_net.h" + #include "ps3_gelic_wireless.h" + + #define DRV_NAME "Gelic Network Driver" + #define DRV_VERSION "2.0" + + MODULE_AUTHOR("SCE Inc."); + MODULE_DESCRIPTION("Gelic Network driver"); + MODULE_LICENSE("GPL"); + + + static inline void gelic_card_enable_rxdmac(struct gelic_card *card); + static inline void gelic_card_disable_rxdmac(struct gelic_card *card); + static inline void gelic_card_disable_txdmac(struct gelic_card *card); + static inline void gelic_card_reset_chain(struct gelic_card *card, + struct gelic_descr_chain *chain, + struct gelic_descr *start_descr); + + /* set irq_mask */ + int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask) + { + int status; + + status = lv1_net_set_interrupt_mask(bus_id(card), dev_id(card), + mask, 0); + if (status) + dev_info(ctodev(card), + "%s failed %d\n", __func__, status); + return status; + } + + static inline void gelic_card_rx_irq_on(struct gelic_card *card) + { + card->irq_mask |= GELIC_CARD_RXINT; + gelic_card_set_irq_mask(card, card->irq_mask); + } + static inline void gelic_card_rx_irq_off(struct gelic_card *card) + { + card->irq_mask &= ~GELIC_CARD_RXINT; + gelic_card_set_irq_mask(card, card->irq_mask); + } + + static void gelic_card_get_ether_port_status(struct gelic_card *card, + int inform) + { + u64 v2; + struct net_device *ether_netdev; + + lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_ETH_PORT_STATUS, + GELIC_LV1_VLAN_TX_ETHERNET_0, 0, 0, + &card->ether_port_status, &v2); + + if (inform) { + ether_netdev = card->netdev[GELIC_PORT_ETHERNET_0]; + if (card->ether_port_status & GELIC_LV1_ETHER_LINK_UP) + netif_carrier_on(ether_netdev); + else + netif_carrier_off(ether_netdev); + } + } + + static int gelic_card_set_link_mode(struct gelic_card *card, int mode) + { + int status; + u64 v1, v2; + + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_NEGOTIATION_MODE, + GELIC_LV1_PHY_ETHERNET_0, mode, 0, &v1, &v2); + if (status) { + pr_info("%s: failed setting negotiation mode %d\n", __func__, + status); + return -EBUSY; + } + + card->link_mode = mode; + return 0; + } + + void gelic_card_up(struct gelic_card *card) + { + pr_debug("%s: called\n", __func__); + mutex_lock(&card->updown_lock); + if (atomic_inc_return(&card->users) == 1) { + pr_debug("%s: real do\n", __func__); + /* enable irq */ + gelic_card_set_irq_mask(card, card->irq_mask); + /* start rx */ + gelic_card_enable_rxdmac(card); + + napi_enable(&card->napi); + } + mutex_unlock(&card->updown_lock); + pr_debug("%s: done\n", __func__); + } + + void gelic_card_down(struct gelic_card *card) + { + u64 mask; + pr_debug("%s: called\n", __func__); + mutex_lock(&card->updown_lock); + if (atomic_dec_if_positive(&card->users) == 0) { + pr_debug("%s: real do\n", __func__); + napi_disable(&card->napi); + /* + * Disable irq. Wireless interrupts will + * be disabled later if any + */ + mask = card->irq_mask & (GELIC_CARD_WLAN_EVENT_RECEIVED | + GELIC_CARD_WLAN_COMMAND_COMPLETED); + gelic_card_set_irq_mask(card, mask); + /* stop rx */ + gelic_card_disable_rxdmac(card); + gelic_card_reset_chain(card, &card->rx_chain, + card->descr + GELIC_NET_TX_DESCRIPTORS); + /* stop tx */ + gelic_card_disable_txdmac(card); + } + mutex_unlock(&card->updown_lock); + pr_debug("%s: done\n", __func__); + } + + /** + * gelic_descr_get_status -- returns the status of a descriptor + * @descr: descriptor to look at + * + * returns the status as in the dmac_cmd_status field of the descriptor + */ + static enum gelic_descr_dma_status + gelic_descr_get_status(struct gelic_descr *descr) + { + return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK; + } + + /** + * gelic_descr_set_status -- sets the status of a descriptor + * @descr: descriptor to change + * @status: status to set in the descriptor + * + * changes the status to the specified value. Doesn't change other bits + * in the status + */ + static void gelic_descr_set_status(struct gelic_descr *descr, + enum gelic_descr_dma_status status) + { + descr->dmac_cmd_status = cpu_to_be32(status | + (be32_to_cpu(descr->dmac_cmd_status) & + ~GELIC_DESCR_DMA_STAT_MASK)); + /* + * dma_cmd_status field is used to indicate whether the descriptor + * is valid or not. + * Usually caller of this function wants to inform that to the + * hardware, so we assure here the hardware sees the change. + */ + wmb(); + } + + /** + * gelic_card_free_chain - free descriptor chain + * @card: card structure + * @descr_in: address of desc + */ + static void gelic_card_free_chain(struct gelic_card *card, + struct gelic_descr *descr_in) + { + struct gelic_descr *descr; + + for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) { + dma_unmap_single(ctodev(card), descr->bus_addr, + GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL); + descr->bus_addr = 0; + } + } + + /** + * gelic_card_init_chain - links descriptor chain + * @card: card structure + * @chain: address of chain + * @start_descr: address of descriptor array + * @no: number of descriptors + * + * we manage a circular list that mirrors the hardware structure, + * except that the hardware uses bus addresses. + * + * returns 0 on success, <0 on failure + */ + static int __devinit gelic_card_init_chain(struct gelic_card *card, + struct gelic_descr_chain *chain, + struct gelic_descr *start_descr, + int no) + { + int i; + struct gelic_descr *descr; + + descr = start_descr; + memset(descr, 0, sizeof(*descr) * no); + + /* set up the hardware pointers in each descriptor */ + for (i = 0; i < no; i++, descr++) { + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); + descr->bus_addr = + dma_map_single(ctodev(card), descr, + GELIC_DESCR_SIZE, + DMA_BIDIRECTIONAL); + + if (!descr->bus_addr) + goto iommu_error; + + descr->next = descr + 1; + descr->prev = descr - 1; + } + /* make them as ring */ + (descr - 1)->next = start_descr; + start_descr->prev = (descr - 1); + + /* chain bus addr of hw descriptor */ + descr = start_descr; + for (i = 0; i < no; i++, descr++) { + descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr); + } + + chain->head = start_descr; + chain->tail = start_descr; + + /* do not chain last hw descriptor */ + (descr - 1)->next_descr_addr = 0; + + return 0; + + iommu_error: + for (i--, descr--; 0 <= i; i--, descr--) + if (descr->bus_addr) + dma_unmap_single(ctodev(card), descr->bus_addr, + GELIC_DESCR_SIZE, + DMA_BIDIRECTIONAL); + return -ENOMEM; + } + + /** + * gelic_card_reset_chain - reset status of a descriptor chain + * @card: card structure + * @chain: address of chain + * @start_descr: address of descriptor array + * + * Reset the status of dma descriptors to ready state + * and re-initialize the hardware chain for later use + */ + static void gelic_card_reset_chain(struct gelic_card *card, + struct gelic_descr_chain *chain, + struct gelic_descr *start_descr) + { + struct gelic_descr *descr; + + for (descr = start_descr; start_descr != descr->next; descr++) { + gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED); + descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr); + } + + chain->head = start_descr; + chain->tail = (descr - 1); + + (descr - 1)->next_descr_addr = 0; + } + /** + * gelic_descr_prepare_rx - reinitializes a rx descriptor + * @card: card structure + * @descr: descriptor to re-init + * + * return 0 on success, <0 on failure + * + * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. + * Activate the descriptor state-wise + */ + static int gelic_descr_prepare_rx(struct gelic_card *card, + struct gelic_descr *descr) + { + int offset; + unsigned int bufsize; + + if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) + dev_info(ctodev(card), "%s: ERROR status\n", __func__); + /* we need to round up the buffer size to a multiple of 128 */ + bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); + + /* and we need to have it 128 byte aligned, therefore we allocate a + * bit more */ + descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1); + if (!descr->skb) { + descr->buf_addr = 0; /* tell DMAC don't touch memory */ + dev_info(ctodev(card), + "%s:allocate skb failed !!\n", __func__); + return -ENOMEM; + } + descr->buf_size = cpu_to_be32(bufsize); + descr->dmac_cmd_status = 0; + descr->result_size = 0; + descr->valid_size = 0; + descr->data_error = 0; + + offset = ((unsigned long)descr->skb->data) & + (GELIC_NET_RXBUF_ALIGN - 1); + if (offset) + skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset); + /* io-mmu-map the skb */ + descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card), + descr->skb->data, + GELIC_NET_MAX_MTU, + DMA_FROM_DEVICE)); + if (!descr->buf_addr) { + dev_kfree_skb_any(descr->skb); + descr->skb = NULL; + dev_info(ctodev(card), + "%s:Could not iommu-map rx buffer\n", __func__); + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); + return -ENOMEM; + } else { + gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED); + return 0; + } + } + + /** + * gelic_card_release_rx_chain - free all skb of rx descr + * @card: card structure + * + */ + static void gelic_card_release_rx_chain(struct gelic_card *card) + { + struct gelic_descr *descr = card->rx_chain.head; + + do { + if (descr->skb) { + dma_unmap_single(ctodev(card), + be32_to_cpu(descr->buf_addr), + descr->skb->len, + DMA_FROM_DEVICE); + descr->buf_addr = 0; + dev_kfree_skb_any(descr->skb); + descr->skb = NULL; + gelic_descr_set_status(descr, + GELIC_DESCR_DMA_NOT_IN_USE); + } + descr = descr->next; + } while (descr != card->rx_chain.head); + } + + /** + * gelic_card_fill_rx_chain - fills descriptors/skbs in the rx chains + * @card: card structure + * + * fills all descriptors in the rx chain: allocates skbs + * and iommu-maps them. + * returns 0 on success, < 0 on failure + */ + static int gelic_card_fill_rx_chain(struct gelic_card *card) + { + struct gelic_descr *descr = card->rx_chain.head; + int ret; + + do { + if (!descr->skb) { + ret = gelic_descr_prepare_rx(card, descr); + if (ret) + goto rewind; + } + descr = descr->next; + } while (descr != card->rx_chain.head); + + return 0; + rewind: + gelic_card_release_rx_chain(card); + return ret; + } + + /** + * gelic_card_alloc_rx_skbs - allocates rx skbs in rx descriptor chains + * @card: card structure + * + * returns 0 on success, < 0 on failure + */ + static int __devinit gelic_card_alloc_rx_skbs(struct gelic_card *card) + { + struct gelic_descr_chain *chain; + int ret; + chain = &card->rx_chain; + ret = gelic_card_fill_rx_chain(card); + chain->tail = card->rx_top->prev; /* point to the last */ + return ret; + } + + /** + * gelic_descr_release_tx - processes a used tx descriptor + * @card: card structure + * @descr: descriptor to release + * + * releases a used tx descriptor (unmapping, freeing of skb) + */ + static void gelic_descr_release_tx(struct gelic_card *card, + struct gelic_descr *descr) + { + struct sk_buff *skb = descr->skb; + + BUG_ON(!(be32_to_cpu(descr->data_status) & GELIC_DESCR_TX_TAIL)); + + dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + + descr->buf_addr = 0; + descr->buf_size = 0; + descr->next_descr_addr = 0; + descr->result_size = 0; + descr->valid_size = 0; + descr->data_status = 0; + descr->data_error = 0; + descr->skb = NULL; + + /* set descr status */ + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); + } + + static void gelic_card_stop_queues(struct gelic_card *card) + { + netif_stop_queue(card->netdev[GELIC_PORT_ETHERNET_0]); + + if (card->netdev[GELIC_PORT_WIRELESS]) + netif_stop_queue(card->netdev[GELIC_PORT_WIRELESS]); + } + static void gelic_card_wake_queues(struct gelic_card *card) + { + netif_wake_queue(card->netdev[GELIC_PORT_ETHERNET_0]); + + if (card->netdev[GELIC_PORT_WIRELESS]) + netif_wake_queue(card->netdev[GELIC_PORT_WIRELESS]); + } + /** + * gelic_card_release_tx_chain - processes sent tx descriptors + * @card: adapter structure + * @stop: net_stop sequence + * + * releases the tx descriptors that gelic has finished with + */ + static void gelic_card_release_tx_chain(struct gelic_card *card, int stop) + { + struct gelic_descr_chain *tx_chain; + enum gelic_descr_dma_status status; + struct net_device *netdev; + int release = 0; + + for (tx_chain = &card->tx_chain; + tx_chain->head != tx_chain->tail && tx_chain->tail; + tx_chain->tail = tx_chain->tail->next) { + status = gelic_descr_get_status(tx_chain->tail); + netdev = tx_chain->tail->skb->dev; + switch (status) { + case GELIC_DESCR_DMA_RESPONSE_ERROR: + case GELIC_DESCR_DMA_PROTECTION_ERROR: + case GELIC_DESCR_DMA_FORCE_END: + if (printk_ratelimit()) + dev_info(ctodev(card), + "%s: forcing end of tx descriptor " \ + "with status %x\n", + __func__, status); + netdev->stats.tx_dropped++; + break; + + case GELIC_DESCR_DMA_COMPLETE: + if (tx_chain->tail->skb) { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += + tx_chain->tail->skb->len; + } + break; + + case GELIC_DESCR_DMA_CARDOWNED: + /* pending tx request */ + default: + /* any other value (== GELIC_DESCR_DMA_NOT_IN_USE) */ + if (!stop) + goto out; + } + gelic_descr_release_tx(card, tx_chain->tail); + release ++; + } + out: + if (!stop && release) + gelic_card_wake_queues(card); + } + + /** + * gelic_net_set_multi - sets multicast addresses and promisc flags + * @netdev: interface device structure + * + * gelic_net_set_multi configures multicast addresses as needed for the + * netdev interface. It also sets up multicast, allmulti and promisc + * flags appropriately + */ + void gelic_net_set_multi(struct net_device *netdev) + { + struct gelic_card *card = netdev_card(netdev); + struct netdev_hw_addr *ha; + unsigned int i; + uint8_t *p; + u64 addr; + int status; + + /* clear all multicast address */ + status = lv1_net_remove_multicast_address(bus_id(card), dev_id(card), + 0, 1); + if (status) + dev_err(ctodev(card), + "lv1_net_remove_multicast_address failed %d\n", + status); + /* set broadcast address */ + status = lv1_net_add_multicast_address(bus_id(card), dev_id(card), + GELIC_NET_BROADCAST_ADDR, 0); + if (status) + dev_err(ctodev(card), + "lv1_net_add_multicast_address failed, %d\n", + status); + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > GELIC_NET_MC_COUNT_MAX)) { + status = lv1_net_add_multicast_address(bus_id(card), + dev_id(card), + 0, 1); + if (status) + dev_err(ctodev(card), + "lv1_net_add_multicast_address failed, %d\n", + status); + return; + } + + /* set multicast addresses */ + netdev_for_each_mc_addr(ha, netdev) { + addr = 0; + p = ha->addr; + for (i = 0; i < ETH_ALEN; i++) { + addr <<= 8; + addr |= *p++; + } + status = lv1_net_add_multicast_address(bus_id(card), + dev_id(card), + addr, 0); + if (status) + dev_err(ctodev(card), + "lv1_net_add_multicast_address failed, %d\n", + status); + } + } + + /** + * gelic_card_enable_rxdmac - enables the receive DMA controller + * @card: card structure + * + * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN + * in the GDADMACCNTR register + */ + static inline void gelic_card_enable_rxdmac(struct gelic_card *card) + { + int status; + + #ifdef DEBUG + if (gelic_descr_get_status(card->rx_chain.head) != + GELIC_DESCR_DMA_CARDOWNED) { + printk(KERN_ERR "%s: status=%x\n", __func__, + be32_to_cpu(card->rx_chain.head->dmac_cmd_status)); + printk(KERN_ERR "%s: nextphy=%x\n", __func__, + be32_to_cpu(card->rx_chain.head->next_descr_addr)); + printk(KERN_ERR "%s: head=%p\n", __func__, + card->rx_chain.head); + } + #endif + status = lv1_net_start_rx_dma(bus_id(card), dev_id(card), + card->rx_chain.head->bus_addr, 0); + if (status) + dev_info(ctodev(card), + "lv1_net_start_rx_dma failed, status=%d\n", status); + } + + /** + * gelic_card_disable_rxdmac - disables the receive DMA controller + * @card: card structure + * + * gelic_card_disable_rxdmac terminates processing on the DMA controller by + * turing off DMA and issuing a force end + */ + static inline void gelic_card_disable_rxdmac(struct gelic_card *card) + { + int status; + + /* this hvc blocks until the DMA in progress really stopped */ + status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card), 0); + if (status) + dev_err(ctodev(card), + "lv1_net_stop_rx_dma failed, %d\n", status); + } + + /** + * gelic_card_disable_txdmac - disables the transmit DMA controller + * @card: card structure + * + * gelic_card_disable_txdmac terminates processing on the DMA controller by + * turing off DMA and issuing a force end + */ + static inline void gelic_card_disable_txdmac(struct gelic_card *card) + { + int status; + + /* this hvc blocks until the DMA in progress really stopped */ + status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0); + if (status) + dev_err(ctodev(card), + "lv1_net_stop_tx_dma failed, status=%d\n", status); + } + + /** + * gelic_net_stop - called upon ifconfig down + * @netdev: interface device structure + * + * always returns 0 + */ + int gelic_net_stop(struct net_device *netdev) + { + struct gelic_card *card; + + pr_debug("%s: start\n", __func__); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + card = netdev_card(netdev); + gelic_card_down(card); + + pr_debug("%s: done\n", __func__); + return 0; + } + + /** + * gelic_card_get_next_tx_descr - returns the next available tx descriptor + * @card: device structure to get descriptor from + * + * returns the address of the next descriptor, or NULL if not available. + */ + static struct gelic_descr * + gelic_card_get_next_tx_descr(struct gelic_card *card) + { + if (!card->tx_chain.head) + return NULL; + /* see if the next descriptor is free */ + if (card->tx_chain.tail != card->tx_chain.head->next && + gelic_descr_get_status(card->tx_chain.head) == + GELIC_DESCR_DMA_NOT_IN_USE) + return card->tx_chain.head; + else + return NULL; + + } + + /** + * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field + * @descr: descriptor structure to fill out + * @skb: packet to consider + * + * fills out the command and status field of the descriptor structure, + * depending on hardware checksum settings. This function assumes a wmb() + * has executed before. + */ + static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr, + struct sk_buff *skb) + { + if (skb->ip_summed != CHECKSUM_PARTIAL) + descr->dmac_cmd_status = + cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM | + GELIC_DESCR_TX_DMA_FRAME_TAIL); + else { + /* is packet ip? + * if yes: tcp? udp? */ + if (skb->protocol == htons(ETH_P_IP)) { + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + descr->dmac_cmd_status = + cpu_to_be32(GELIC_DESCR_DMA_CMD_TCP_CHKSUM | + GELIC_DESCR_TX_DMA_FRAME_TAIL); + + else if (ip_hdr(skb)->protocol == IPPROTO_UDP) + descr->dmac_cmd_status = + cpu_to_be32(GELIC_DESCR_DMA_CMD_UDP_CHKSUM | + GELIC_DESCR_TX_DMA_FRAME_TAIL); + else /* + * the stack should checksum non-tcp and non-udp + * packets on his own: NETIF_F_IP_CSUM + */ + descr->dmac_cmd_status = + cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM | + GELIC_DESCR_TX_DMA_FRAME_TAIL); + } + } + } + + static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, + unsigned short tag) + { + struct vlan_ethhdr *veth; + static unsigned int c; + + if (skb_headroom(skb) < VLAN_HLEN) { + struct sk_buff *sk_tmp = skb; + pr_debug("%s: hd=%d c=%ud\n", __func__, skb_headroom(skb), c); + skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN); + if (!skb) + return NULL; + dev_kfree_skb_any(sk_tmp); + } + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); + + /* Move the mac addresses to the top of buffer */ + memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + + veth->h_vlan_proto = cpu_to_be16(ETH_P_8021Q); + veth->h_vlan_TCI = htons(tag); + + return skb; + } + + /** + * gelic_descr_prepare_tx - setup a descriptor for sending packets + * @card: card structure + * @descr: descriptor structure + * @skb: packet to use + * + * returns 0 on success, <0 on failure. + * + */ + static int gelic_descr_prepare_tx(struct gelic_card *card, + struct gelic_descr *descr, + struct sk_buff *skb) + { + dma_addr_t buf; + + if (card->vlan_required) { + struct sk_buff *skb_tmp; + enum gelic_port_type type; + + type = netdev_port(skb->dev)->type; + skb_tmp = gelic_put_vlan_tag(skb, + card->vlan[type].tx); + if (!skb_tmp) + return -ENOMEM; + skb = skb_tmp; + } + + buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE); + + if (!buf) { + dev_err(ctodev(card), + "dma map 2 failed (%p, %i). Dropping packet\n", + skb->data, skb->len); + return -ENOMEM; + } + + descr->buf_addr = cpu_to_be32(buf); + descr->buf_size = cpu_to_be32(skb->len); + descr->skb = skb; + descr->data_status = 0; + descr->next_descr_addr = 0; /* terminate hw descr */ + gelic_descr_set_tx_cmdstat(descr, skb); + + /* bump free descriptor pointer */ + card->tx_chain.head = descr->next; + return 0; + } + + /** + * gelic_card_kick_txdma - enables TX DMA processing + * @card: card structure + * @descr: descriptor address to enable TX processing at + * + */ + static int gelic_card_kick_txdma(struct gelic_card *card, + struct gelic_descr *descr) + { + int status = 0; + + if (card->tx_dma_progress) + return 0; + + if (gelic_descr_get_status(descr) == GELIC_DESCR_DMA_CARDOWNED) { + card->tx_dma_progress = 1; + status = lv1_net_start_tx_dma(bus_id(card), dev_id(card), + descr->bus_addr, 0); + if (status) { + card->tx_dma_progress = 0; + dev_info(ctodev(card), "lv1_net_start_txdma failed," \ + "status=%d\n", status); + } + } + return status; + } + + /** + * gelic_net_xmit - transmits a frame over the device + * @skb: packet to send out + * @netdev: interface device structure + * + * returns 0 on success, <0 on failure + */ + int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) + { + struct gelic_card *card = netdev_card(netdev); + struct gelic_descr *descr; + int result; + unsigned long flags; + + spin_lock_irqsave(&card->tx_lock, flags); + + gelic_card_release_tx_chain(card, 0); + + descr = gelic_card_get_next_tx_descr(card); + if (!descr) { + /* + * no more descriptors free + */ + gelic_card_stop_queues(card); + spin_unlock_irqrestore(&card->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + result = gelic_descr_prepare_tx(card, descr, skb); + if (result) { + /* + * DMA map failed. As chances are that failure + * would continue, just release skb and return + */ + netdev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&card->tx_lock, flags); + return NETDEV_TX_OK; + } + /* + * link this prepared descriptor to previous one + * to achieve high performance + */ + descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr); + /* + * as hardware descriptor is modified in the above lines, + * ensure that the hardware sees it + */ + wmb(); + if (gelic_card_kick_txdma(card, descr)) { + /* + * kick failed. + * release descriptor which was just prepared + */ + netdev->stats.tx_dropped++; + /* don't trigger BUG_ON() in gelic_descr_release_tx */ + descr->data_status = cpu_to_be32(GELIC_DESCR_TX_TAIL); + gelic_descr_release_tx(card, descr); + /* reset head */ + card->tx_chain.head = descr; + /* reset hw termination */ + descr->prev->next_descr_addr = 0; + dev_info(ctodev(card), "%s: kick failure\n", __func__); + } + + spin_unlock_irqrestore(&card->tx_lock, flags); + return NETDEV_TX_OK; + } + + /** + * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on + * @descr: descriptor to process + * @card: card structure + * @netdev: net_device structure to be passed packet + * + * iommu-unmaps the skb, fills out skb structure and passes the data to the + * stack. The descriptor state is not changed. + */ + static void gelic_net_pass_skb_up(struct gelic_descr *descr, + struct gelic_card *card, + struct net_device *netdev) + + { + struct sk_buff *skb = descr->skb; + u32 data_status, data_error; + + data_status = be32_to_cpu(descr->data_status); + data_error = be32_to_cpu(descr->data_error); + /* unmap skb buffer */ + dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), + GELIC_NET_MAX_MTU, + DMA_FROM_DEVICE); + + skb_put(skb, be32_to_cpu(descr->valid_size)? + be32_to_cpu(descr->valid_size) : + be32_to_cpu(descr->result_size)); + if (!descr->valid_size) + dev_info(ctodev(card), "buffer full %x %x %x\n", + be32_to_cpu(descr->result_size), + be32_to_cpu(descr->buf_size), + be32_to_cpu(descr->dmac_cmd_status)); + + descr->skb = NULL; + /* + * the card put 2 bytes vlan tag in front + * of the ethernet frame + */ + skb_pull(skb, 2); + skb->protocol = eth_type_trans(skb, netdev); + + /* checksum offload */ + if (netdev->features & NETIF_F_RXCSUM) { + if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) && + (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + } else + skb_checksum_none_assert(skb); + + /* update netdevice statistics */ + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + + /* pass skb up to stack */ + netif_receive_skb(skb); + } + + /** + * gelic_card_decode_one_descr - processes an rx descriptor + * @card: card structure + * + * returns 1 if a packet has been sent to the stack, otherwise 0 + * + * processes an rx descriptor by iommu-unmapping the data buffer and passing + * the packet up to the stack + */ + static int gelic_card_decode_one_descr(struct gelic_card *card) + { + enum gelic_descr_dma_status status; + struct gelic_descr_chain *chain = &card->rx_chain; + struct gelic_descr *descr = chain->head; + struct net_device *netdev = NULL; + int dmac_chain_ended; + + status = gelic_descr_get_status(descr); + + if (status == GELIC_DESCR_DMA_CARDOWNED) + return 0; + + if (status == GELIC_DESCR_DMA_NOT_IN_USE) { + dev_dbg(ctodev(card), "dormant descr? %p\n", descr); + return 0; + } + + /* netdevice select */ + if (card->vlan_required) { + unsigned int i; + u16 vid; + vid = *(u16 *)(descr->skb->data) & VLAN_VID_MASK; + for (i = 0; i < GELIC_PORT_MAX; i++) { + if (card->vlan[i].rx == vid) { + netdev = card->netdev[i]; + break; + } + } + if (GELIC_PORT_MAX <= i) { + pr_info("%s: unknown packet vid=%x\n", __func__, vid); + goto refill; + } + } else + netdev = card->netdev[GELIC_PORT_ETHERNET_0]; + + if ((status == GELIC_DESCR_DMA_RESPONSE_ERROR) || + (status == GELIC_DESCR_DMA_PROTECTION_ERROR) || + (status == GELIC_DESCR_DMA_FORCE_END)) { + dev_info(ctodev(card), "dropping RX descriptor with state %x\n", + status); + netdev->stats.rx_dropped++; + goto refill; + } + + if (status == GELIC_DESCR_DMA_BUFFER_FULL) { + /* + * Buffer full would occur if and only if + * the frame length was longer than the size of this + * descriptor's buffer. If the frame length was equal + * to or shorter than buffer'size, FRAME_END condition + * would occur. + * Anyway this frame was longer than the MTU, + * just drop it. + */ + dev_info(ctodev(card), "overlength frame\n"); + goto refill; + } + /* + * descriptors any other than FRAME_END here should + * be treated as error. + */ + if (status != GELIC_DESCR_DMA_FRAME_END) { + dev_dbg(ctodev(card), "RX descriptor with state %x\n", + status); + goto refill; + } + + /* ok, we've got a packet in descr */ + gelic_net_pass_skb_up(descr, card, netdev); + refill: + + /* is the current descriptor terminated with next_descr == NULL? */ + dmac_chain_ended = + be32_to_cpu(descr->dmac_cmd_status) & + GELIC_DESCR_RX_DMA_CHAIN_END; + /* + * So that always DMAC can see the end + * of the descriptor chain to avoid + * from unwanted DMAC overrun. + */ + descr->next_descr_addr = 0; + + /* change the descriptor state: */ + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); + + /* + * this call can fail, but for now, just leave this + * decriptor without skb + */ + gelic_descr_prepare_rx(card, descr); + + chain->tail = descr; + chain->head = descr->next; + + /* + * Set this descriptor the end of the chain. + */ + descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr); + + /* + * If dmac chain was met, DMAC stopped. + * thus re-enable it + */ + + if (dmac_chain_ended) + gelic_card_enable_rxdmac(card); + + return 1; + } + + /** + * gelic_net_poll - NAPI poll function called by the stack to return packets + * @napi: napi structure + * @budget: number of packets we can pass to the stack at most + * + * returns the number of the processed packets + * + */ + static int gelic_net_poll(struct napi_struct *napi, int budget) + { + struct gelic_card *card = container_of(napi, struct gelic_card, napi); + int packets_done = 0; + + while (packets_done < budget) { + if (!gelic_card_decode_one_descr(card)) + break; + + packets_done++; + } + + if (packets_done < budget) { + napi_complete(napi); + gelic_card_rx_irq_on(card); + } + return packets_done; + } + /** + * gelic_net_change_mtu - changes the MTU of an interface + * @netdev: interface device structure + * @new_mtu: new MTU value + * + * returns 0 on success, <0 on failure + */ + int gelic_net_change_mtu(struct net_device *netdev, int new_mtu) + { + /* no need to re-alloc skbs or so -- the max mtu is about 2.3k + * and mtu is outbound only anyway */ + if ((new_mtu < GELIC_NET_MIN_MTU) || + (new_mtu > GELIC_NET_MAX_MTU)) { + return -EINVAL; + } + netdev->mtu = new_mtu; + return 0; + } + + /** + * gelic_card_interrupt - event handler for gelic_net + */ + static irqreturn_t gelic_card_interrupt(int irq, void *ptr) + { + unsigned long flags; + struct gelic_card *card = ptr; + u64 status; + + status = card->irq_status; + + if (!status) + return IRQ_NONE; + + status &= card->irq_mask; + + if (status & GELIC_CARD_RXINT) { + gelic_card_rx_irq_off(card); + napi_schedule(&card->napi); + } + + if (status & GELIC_CARD_TXINT) { + spin_lock_irqsave(&card->tx_lock, flags); + card->tx_dma_progress = 0; + gelic_card_release_tx_chain(card, 0); + /* kick outstanding tx descriptor if any */ + gelic_card_kick_txdma(card, card->tx_chain.tail); + spin_unlock_irqrestore(&card->tx_lock, flags); + } + + /* ether port status changed */ + if (status & GELIC_CARD_PORT_STATUS_CHANGED) + gelic_card_get_ether_port_status(card, 1); + + #ifdef CONFIG_GELIC_WIRELESS + if (status & (GELIC_CARD_WLAN_EVENT_RECEIVED | + GELIC_CARD_WLAN_COMMAND_COMPLETED)) + gelic_wl_interrupt(card->netdev[GELIC_PORT_WIRELESS], status); + #endif + + return IRQ_HANDLED; + } + + #ifdef CONFIG_NET_POLL_CONTROLLER + /** + * gelic_net_poll_controller - artificial interrupt for netconsole etc. + * @netdev: interface device structure + * + * see Documentation/networking/netconsole.txt + */ + void gelic_net_poll_controller(struct net_device *netdev) + { + struct gelic_card *card = netdev_card(netdev); + + gelic_card_set_irq_mask(card, 0); + gelic_card_interrupt(netdev->irq, netdev); + gelic_card_set_irq_mask(card, card->irq_mask); + } + #endif /* CONFIG_NET_POLL_CONTROLLER */ + + /** + * gelic_net_open - called upon ifconfig up + * @netdev: interface device structure + * + * returns 0 on success, <0 on failure + * + * gelic_net_open allocates all the descriptors and memory needed for + * operation, sets up multicast list and enables interrupts + */ + int gelic_net_open(struct net_device *netdev) + { + struct gelic_card *card = netdev_card(netdev); + + dev_dbg(ctodev(card), " -> %s %p\n", __func__, netdev); + + gelic_card_up(card); + + netif_start_queue(netdev); + gelic_card_get_ether_port_status(card, 1); + + dev_dbg(ctodev(card), " <- %s\n", __func__); + return 0; + } + + void gelic_net_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) + { + strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1); + strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1); + } + + static int gelic_ether_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) + { + struct gelic_card *card = netdev_card(netdev); + + gelic_card_get_ether_port_status(card, 0); + + if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; + + switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) { + case GELIC_LV1_ETHER_SPEED_10: + ethtool_cmd_speed_set(cmd, SPEED_10); + break; + case GELIC_LV1_ETHER_SPEED_100: + ethtool_cmd_speed_set(cmd, SPEED_100); + break; + case GELIC_LV1_ETHER_SPEED_1000: + ethtool_cmd_speed_set(cmd, SPEED_1000); + break; + default: + pr_info("%s: speed unknown\n", __func__); + ethtool_cmd_speed_set(cmd, SPEED_10); + break; + } + + cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full; + cmd->advertising = cmd->supported; + if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) { + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->autoneg = AUTONEG_DISABLE; + cmd->advertising &= ~ADVERTISED_Autoneg; + } + cmd->port = PORT_TP; + + return 0; + } + + static int gelic_ether_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) + { + struct gelic_card *card = netdev_card(netdev); + u64 mode; + int ret; + + if (cmd->autoneg == AUTONEG_ENABLE) { + mode = GELIC_LV1_ETHER_AUTO_NEG; + } else { + switch (cmd->speed) { + case SPEED_10: + mode = GELIC_LV1_ETHER_SPEED_10; + break; + case SPEED_100: + mode = GELIC_LV1_ETHER_SPEED_100; + break; + case SPEED_1000: + mode = GELIC_LV1_ETHER_SPEED_1000; + break; + default: + return -EINVAL; + } + if (cmd->duplex == DUPLEX_FULL) + mode |= GELIC_LV1_ETHER_FULL_DUPLEX; + else if (cmd->speed == SPEED_1000) { + pr_info("1000 half duplex is not supported.\n"); + return -EINVAL; + } + } + + ret = gelic_card_set_link_mode(card, mode); + + if (ret) + return ret; + + return 0; + } + + static void gelic_net_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) + { + if (0 <= ps3_compare_firmware_version(2, 2, 0)) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + + wol->wolopts = ps3_sys_manager_get_wol() ? wol->supported : 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); + } + static int gelic_net_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) + { + int status; + struct gelic_card *card; + u64 v1, v2; + + if (ps3_compare_firmware_version(2, 2, 0) < 0 || + !capable(CAP_NET_ADMIN)) + return -EPERM; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + card = netdev_card(netdev); + if (wol->wolopts & WAKE_MAGIC) { + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_WOL, + GELIC_LV1_WOL_MAGIC_PACKET, + 0, GELIC_LV1_WOL_MP_ENABLE, + &v1, &v2); + if (status) { + pr_info("%s: enabling WOL failed %d\n", __func__, + status); + status = -EIO; + goto done; + } + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_WOL, + GELIC_LV1_WOL_ADD_MATCH_ADDR, + 0, GELIC_LV1_WOL_MATCH_ALL, + &v1, &v2); + if (!status) + ps3_sys_manager_set_wol(1); + else { + pr_info("%s: enabling WOL filter failed %d\n", + __func__, status); + status = -EIO; + } + } else { + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_WOL, + GELIC_LV1_WOL_MAGIC_PACKET, + 0, GELIC_LV1_WOL_MP_DISABLE, + &v1, &v2); + if (status) { + pr_info("%s: disabling WOL failed %d\n", __func__, + status); + status = -EIO; + goto done; + } + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_WOL, + GELIC_LV1_WOL_DELETE_MATCH_ADDR, + 0, GELIC_LV1_WOL_MATCH_ALL, + &v1, &v2); + if (!status) + ps3_sys_manager_set_wol(0); + else { + pr_info("%s: removing WOL filter failed %d\n", + __func__, status); + status = -EIO; + } + } + done: + return status; + } + + static const struct ethtool_ops gelic_ether_ethtool_ops = { + .get_drvinfo = gelic_net_get_drvinfo, + .get_settings = gelic_ether_get_settings, + .set_settings = gelic_ether_set_settings, + .get_link = ethtool_op_get_link, + .get_wol = gelic_net_get_wol, + .set_wol = gelic_net_set_wol, + }; + + /** + * gelic_net_tx_timeout_task - task scheduled by the watchdog timeout + * function (to be called not under interrupt status) + * @work: work is context of tx timout task + * + * called as task when tx hangs, resets interface (if interface is up) + */ + static void gelic_net_tx_timeout_task(struct work_struct *work) + { + struct gelic_card *card = + container_of(work, struct gelic_card, tx_timeout_task); + struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0]; + + dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__); + + if (!(netdev->flags & IFF_UP)) + goto out; + + netif_device_detach(netdev); + gelic_net_stop(netdev); + + gelic_net_open(netdev); + netif_device_attach(netdev); + + out: + atomic_dec(&card->tx_timeout_task_counter); + } + + /** + * gelic_net_tx_timeout - called when the tx timeout watchdog kicks in. + * @netdev: interface device structure + * + * called, if tx hangs. Schedules a task that resets the interface + */ + void gelic_net_tx_timeout(struct net_device *netdev) + { + struct gelic_card *card; + + card = netdev_card(netdev); + atomic_inc(&card->tx_timeout_task_counter); + if (netdev->flags & IFF_UP) + schedule_work(&card->tx_timeout_task); + else + atomic_dec(&card->tx_timeout_task_counter); + } + + static const struct net_device_ops gelic_netdevice_ops = { + .ndo_open = gelic_net_open, + .ndo_stop = gelic_net_stop, + .ndo_start_xmit = gelic_net_xmit, + .ndo_set_rx_mode = gelic_net_set_multi, + .ndo_change_mtu = gelic_net_change_mtu, + .ndo_tx_timeout = gelic_net_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gelic_net_poll_controller, + #endif + }; + + /** + * gelic_ether_setup_netdev_ops - initialization of net_device operations + * @netdev: net_device structure + * + * fills out function pointers in the net_device structure + */ + static void __devinit gelic_ether_setup_netdev_ops(struct net_device *netdev, + struct napi_struct *napi) + { + netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; + /* NAPI */ + netif_napi_add(netdev, napi, + gelic_net_poll, GELIC_NET_NAPI_WEIGHT); + netdev->ethtool_ops = &gelic_ether_ethtool_ops; + netdev->netdev_ops = &gelic_netdevice_ops; + } + + /** + * gelic_ether_setup_netdev - initialization of net_device + * @netdev: net_device structure + * @card: card structure + * + * Returns 0 on success or <0 on failure + * + * gelic_ether_setup_netdev initializes the net_device structure + * and register it. + **/ + int __devinit gelic_net_setup_netdev(struct net_device *netdev, + struct gelic_card *card) + { + int status; + u64 v1, v2; + + netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + + netdev->features = NETIF_F_IP_CSUM; + if (GELIC_CARD_RX_CSUM_DEFAULT) + netdev->features |= NETIF_F_RXCSUM; + + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_MAC_ADDRESS, + 0, 0, 0, &v1, &v2); + v1 <<= 16; + if (status || !is_valid_ether_addr((u8 *)&v1)) { + dev_info(ctodev(card), + "%s:lv1_net_control GET_MAC_ADDR failed %d\n", + __func__, status); + return -EINVAL; + } + memcpy(netdev->dev_addr, &v1, ETH_ALEN); + + if (card->vlan_required) { + netdev->hard_header_len += VLAN_HLEN; + /* + * As vlan is internally used, + * we can not receive vlan packets + */ + netdev->features |= NETIF_F_VLAN_CHALLENGED; + } + + status = register_netdev(netdev); + if (status) { + dev_err(ctodev(card), "%s:Couldn't register %s %d\n", + __func__, netdev->name, status); + return status; + } + dev_info(ctodev(card), "%s: MAC addr %pM\n", + netdev->name, netdev->dev_addr); + + return 0; + } + + /** + * gelic_alloc_card_net - allocates net_device and card structure + * + * returns the card structure or NULL in case of errors + * + * the card and net_device structures are linked to each other + */ + #define GELIC_ALIGN (32) + static struct gelic_card * __devinit gelic_alloc_card_net(struct net_device **netdev) + { + struct gelic_card *card; + struct gelic_port *port; + void *p; + size_t alloc_size; + /* + * gelic requires dma descriptor is 32 bytes aligned and + * the hypervisor requires irq_status is 8 bytes aligned. + */ + BUILD_BUG_ON(offsetof(struct gelic_card, irq_status) % 8); + BUILD_BUG_ON(offsetof(struct gelic_card, descr) % 32); + alloc_size = + sizeof(struct gelic_card) + + sizeof(struct gelic_descr) * GELIC_NET_RX_DESCRIPTORS + + sizeof(struct gelic_descr) * GELIC_NET_TX_DESCRIPTORS + + GELIC_ALIGN - 1; + + p = kzalloc(alloc_size, GFP_KERNEL); + if (!p) + return NULL; + card = PTR_ALIGN(p, GELIC_ALIGN); + card->unalign = p; + + /* + * alloc netdev + */ + *netdev = alloc_etherdev(sizeof(struct gelic_port)); + if (!netdev) { + kfree(card->unalign); + return NULL; + } + port = netdev_priv(*netdev); + + /* gelic_port */ + port->netdev = *netdev; + port->card = card; + port->type = GELIC_PORT_ETHERNET_0; + + /* gelic_card */ + card->netdev[GELIC_PORT_ETHERNET_0] = *netdev; + + INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task); + init_waitqueue_head(&card->waitq); + atomic_set(&card->tx_timeout_task_counter, 0); + mutex_init(&card->updown_lock); + atomic_set(&card->users, 0); + + return card; + } + + static void __devinit gelic_card_get_vlan_info(struct gelic_card *card) + { + u64 v1, v2; + int status; + unsigned int i; + struct { + int tx; + int rx; + } vlan_id_ix[2] = { + [GELIC_PORT_ETHERNET_0] = { + .tx = GELIC_LV1_VLAN_TX_ETHERNET_0, + .rx = GELIC_LV1_VLAN_RX_ETHERNET_0 + }, + [GELIC_PORT_WIRELESS] = { + .tx = GELIC_LV1_VLAN_TX_WIRELESS, + .rx = GELIC_LV1_VLAN_RX_WIRELESS + } + }; + + for (i = 0; i < ARRAY_SIZE(vlan_id_ix); i++) { + /* tx tag */ + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_VLAN_ID, + vlan_id_ix[i].tx, + 0, 0, &v1, &v2); + if (status || !v1) { + if (status != LV1_NO_ENTRY) + dev_dbg(ctodev(card), + "get vlan id for tx(%d) failed(%d)\n", + vlan_id_ix[i].tx, status); + card->vlan[i].tx = 0; + card->vlan[i].rx = 0; + continue; + } + card->vlan[i].tx = (u16)v1; + + /* rx tag */ + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_VLAN_ID, + vlan_id_ix[i].rx, + 0, 0, &v1, &v2); + if (status || !v1) { + if (status != LV1_NO_ENTRY) + dev_info(ctodev(card), + "get vlan id for rx(%d) failed(%d)\n", + vlan_id_ix[i].rx, status); + card->vlan[i].tx = 0; + card->vlan[i].rx = 0; + continue; + } + card->vlan[i].rx = (u16)v1; + + dev_dbg(ctodev(card), "vlan_id[%d] tx=%02x rx=%02x\n", + i, card->vlan[i].tx, card->vlan[i].rx); + } + + if (card->vlan[GELIC_PORT_ETHERNET_0].tx) { + BUG_ON(!card->vlan[GELIC_PORT_WIRELESS].tx); + card->vlan_required = 1; + } else + card->vlan_required = 0; + + /* check wirelss capable firmware */ + if (ps3_compare_firmware_version(1, 6, 0) < 0) { + card->vlan[GELIC_PORT_WIRELESS].tx = 0; + card->vlan[GELIC_PORT_WIRELESS].rx = 0; + } + + dev_info(ctodev(card), "internal vlan %s\n", + card->vlan_required? "enabled" : "disabled"); + } + /** + * ps3_gelic_driver_probe - add a device to the control of this driver + */ + static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev) + { + struct gelic_card *card; + struct net_device *netdev; + int result; + + pr_debug("%s: called\n", __func__); ++ ++ udbg_shutdown_ps3gelic(); ++ + result = ps3_open_hv_device(dev); + + if (result) { + dev_dbg(&dev->core, "%s:ps3_open_hv_device failed\n", + __func__); + goto fail_open; + } + + result = ps3_dma_region_create(dev->d_region); + + if (result) { + dev_dbg(&dev->core, "%s:ps3_dma_region_create failed(%d)\n", + __func__, result); + BUG_ON("check region type"); + goto fail_dma_region; + } + + /* alloc card/netdevice */ + card = gelic_alloc_card_net(&netdev); + if (!card) { + dev_info(&dev->core, "%s:gelic_net_alloc_card failed\n", + __func__); + result = -ENOMEM; + goto fail_alloc_card; + } + ps3_system_bus_set_drvdata(dev, card); + card->dev = dev; + + /* get internal vlan info */ + gelic_card_get_vlan_info(card); + + card->link_mode = GELIC_LV1_ETHER_AUTO_NEG; + + /* setup interrupt */ + result = lv1_net_set_interrupt_status_indicator(bus_id(card), + dev_id(card), + ps3_mm_phys_to_lpar(__pa(&card->irq_status)), + 0); + + if (result) { + dev_dbg(&dev->core, + "%s:set_interrupt_status_indicator failed: %s\n", + __func__, ps3_result(result)); + result = -EIO; + goto fail_status_indicator; + } + + result = ps3_sb_event_receive_port_setup(dev, PS3_BINDING_CPU_ANY, + &card->irq); + + if (result) { + dev_info(ctodev(card), + "%s:gelic_net_open_device failed (%d)\n", + __func__, result); + result = -EPERM; + goto fail_alloc_irq; + } + result = request_irq(card->irq, gelic_card_interrupt, + IRQF_DISABLED, netdev->name, card); + + if (result) { + dev_info(ctodev(card), "%s:request_irq failed (%d)\n", + __func__, result); + goto fail_request_irq; + } + + /* setup card structure */ + card->irq_mask = GELIC_CARD_RXINT | GELIC_CARD_TXINT | + GELIC_CARD_PORT_STATUS_CHANGED; + + + if (gelic_card_init_chain(card, &card->tx_chain, + card->descr, GELIC_NET_TX_DESCRIPTORS)) + goto fail_alloc_tx; + if (gelic_card_init_chain(card, &card->rx_chain, + card->descr + GELIC_NET_TX_DESCRIPTORS, + GELIC_NET_RX_DESCRIPTORS)) + goto fail_alloc_rx; + + /* head of chain */ + card->tx_top = card->tx_chain.head; + card->rx_top = card->rx_chain.head; + dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n", + card->rx_top, card->tx_top, sizeof(struct gelic_descr), + GELIC_NET_RX_DESCRIPTORS); + /* allocate rx skbs */ + if (gelic_card_alloc_rx_skbs(card)) + goto fail_alloc_skbs; + + spin_lock_init(&card->tx_lock); + card->tx_dma_progress = 0; + + /* setup net_device structure */ + netdev->irq = card->irq; + SET_NETDEV_DEV(netdev, &card->dev->core); + gelic_ether_setup_netdev_ops(netdev, &card->napi); + result = gelic_net_setup_netdev(netdev, card); + if (result) { + dev_dbg(&dev->core, "%s: setup_netdev failed %d", + __func__, result); + goto fail_setup_netdev; + } + + #ifdef CONFIG_GELIC_WIRELESS + if (gelic_wl_driver_probe(card)) { + dev_dbg(&dev->core, "%s: WL init failed\n", __func__); + goto fail_setup_netdev; + } + #endif + pr_debug("%s: done\n", __func__); + return 0; + + fail_setup_netdev: + fail_alloc_skbs: + gelic_card_free_chain(card, card->rx_chain.head); + fail_alloc_rx: + gelic_card_free_chain(card, card->tx_chain.head); + fail_alloc_tx: + free_irq(card->irq, card); + netdev->irq = NO_IRQ; + fail_request_irq: + ps3_sb_event_receive_port_destroy(dev, card->irq); + fail_alloc_irq: + lv1_net_set_interrupt_status_indicator(bus_id(card), + bus_id(card), + 0, 0); + fail_status_indicator: + ps3_system_bus_set_drvdata(dev, NULL); + kfree(netdev_card(netdev)->unalign); + free_netdev(netdev); + fail_alloc_card: + ps3_dma_region_free(dev->d_region); + fail_dma_region: + ps3_close_hv_device(dev); + fail_open: + return result; + } + + /** + * ps3_gelic_driver_remove - remove a device from the control of this driver + */ + + static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev) + { + struct gelic_card *card = ps3_system_bus_get_drvdata(dev); + struct net_device *netdev0; + pr_debug("%s: called\n", __func__); + + /* set auto-negotiation */ + gelic_card_set_link_mode(card, GELIC_LV1_ETHER_AUTO_NEG); + + #ifdef CONFIG_GELIC_WIRELESS + gelic_wl_driver_remove(card); + #endif + /* stop interrupt */ + gelic_card_set_irq_mask(card, 0); + + /* turn off DMA, force end */ + gelic_card_disable_rxdmac(card); + gelic_card_disable_txdmac(card); + + /* release chains */ + gelic_card_release_tx_chain(card, 1); + gelic_card_release_rx_chain(card); + + gelic_card_free_chain(card, card->tx_top); + gelic_card_free_chain(card, card->rx_top); + + netdev0 = card->netdev[GELIC_PORT_ETHERNET_0]; + /* disconnect event port */ + free_irq(card->irq, card); + netdev0->irq = NO_IRQ; + ps3_sb_event_receive_port_destroy(card->dev, card->irq); + + wait_event(card->waitq, + atomic_read(&card->tx_timeout_task_counter) == 0); + + lv1_net_set_interrupt_status_indicator(bus_id(card), dev_id(card), + 0 , 0); + + unregister_netdev(netdev0); + kfree(netdev_card(netdev0)->unalign); + free_netdev(netdev0); + + ps3_system_bus_set_drvdata(dev, NULL); + + ps3_dma_region_free(dev->d_region); + + ps3_close_hv_device(dev); + + pr_debug("%s: done\n", __func__); + return 0; + } + + static struct ps3_system_bus_driver ps3_gelic_driver = { + .match_id = PS3_MATCH_ID_GELIC, + .probe = ps3_gelic_driver_probe, + .remove = ps3_gelic_driver_remove, + .shutdown = ps3_gelic_driver_remove, + .core.name = "ps3_gelic_driver", + .core.owner = THIS_MODULE, + }; + + static int __init ps3_gelic_driver_init (void) + { + return firmware_has_feature(FW_FEATURE_PS3_LV1) + ? ps3_system_bus_driver_register(&ps3_gelic_driver) + : -ENODEV; + } + + static void __exit ps3_gelic_driver_exit (void) + { + ps3_system_bus_driver_unregister(&ps3_gelic_driver); + } + + module_init(ps3_gelic_driver_init); + module_exit(ps3_gelic_driver_exit); + + MODULE_ALIAS(PS3_MODULE_ALIAS_GELIC); + diff --combined drivers/net/ethernet/toshiba/ps3_gelic_net.h index a93df6a,d3fadfb..a93df6a --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@@ -359,12 -359,6 +359,12 @@@ static inline void *port_priv(struct ge return port->priv; }
+#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC +extern void udbg_shutdown_ps3gelic(void); +#else +static inline void udbg_shutdown_ps3gelic(void) {} +#endif + extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); /* shared netdev ops */ extern void gelic_card_up(struct gelic_card *card); diff --combined drivers/net/ppp/pptp.c index 89f829f,eae542a..89f829f --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@@ -285,10 -285,8 +285,10 @@@ static int pptp_xmit(struct ppp_channe ip_send_check(iph);
ip_local_out(skb); + return 1;
tx_error: + kfree_skb(skb); return 1; }
@@@ -307,18 -305,11 +307,18 @@@ static int pptp_rcv_core(struct sock *s }
header = (struct pptp_gre_header *)(skb->data); + headersize = sizeof(*header);
/* test if acknowledgement present */ if (PPTP_GRE_IS_A(header->ver)) { - __u32 ack = (PPTP_GRE_IS_S(header->flags)) ? - header->ack : header->seq; /* ack in different place if S = 0 */ + __u32 ack; + + if (!pskb_may_pull(skb, headersize)) + goto drop; + header = (struct pptp_gre_header *)(skb->data); + + /* ack in different place if S = 0 */ + ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq;
ack = ntohl(ack);
@@@ -327,18 -318,21 +327,18 @@@ /* also handle sequence number wrap-around */ if (WRAPPED(ack, opt->ack_recv)) opt->ack_recv = ack; + } else { + headersize -= sizeof(header->ack); } - /* test if payload present */ if (!PPTP_GRE_IS_S(header->flags)) goto drop;
payload_len = ntohs(header->payload_len); seq = ntohl(header->seq);
- /* no ack present? */ - if (!PPTP_GRE_IS_A(header->ver)) - headersize -= sizeof(header->ack); /* check for incomplete packet (length smaller than expected) */ - if (skb->len - headersize < payload_len) + if (!pskb_may_pull(skb, headersize + payload_len)) goto drop;
payload = skb->data + headersize; diff --combined drivers/s390/cio/qdio.h index 903544f,3dd8644..498a4cd --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@@ -18,6 -18,14 +18,6 @@@ #define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */ #define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
-/* - * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait - * till next initiative to give transmitted skbs back to the stack is too long. - * Therefore polling is started in case of multicast queue is filled more - * than 50 percent. - */ -#define QDIO_IQDIO_POLL_LVL 65 /* HS multicast queue */ - enum qdio_irq_states { QDIO_IRQ_STATE_INACTIVE, QDIO_IRQ_STATE_ESTABLISHED, @@@ -36,6 -44,7 +36,7 @@@ #define SLSB_STATE_NOT_INIT 0x0 #define SLSB_STATE_EMPTY 0x1 #define SLSB_STATE_PRIMED 0x2 + #define SLSB_STATE_PENDING 0x3 #define SLSB_STATE_HALTED 0xe #define SLSB_STATE_ERROR 0xf #define SLSB_TYPE_INPUT 0x0 @@@ -59,6 -68,8 +60,8 @@@ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ #define SLSB_P_OUTPUT_EMPTY \ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ + #define SLSB_P_OUTPUT_PENDING \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */ #define SLSB_CU_OUTPUT_PRIMED \ (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ #define SLSB_P_OUTPUT_HALTED \ @@@ -76,19 -87,11 +79,11 @@@ #define CHSC_FLAG_QDIO_CAPABILITY 0x80 #define CHSC_FLAG_VALIDITY 0x40
- /* qdio adapter-characteristics-1 flag */ - #define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ - #define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ - #define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ - #define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */ - #define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */ - #define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ - #define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ - /* SIGA flags */ #define QDIO_SIGA_WRITE 0x00 #define QDIO_SIGA_READ 0x01 #define QDIO_SIGA_SYNC 0x02 + #define QDIO_SIGA_WRITEQ 0x04 #define QDIO_SIGA_QEBSM_FLAG 0x80
#ifdef CONFIG_64BIT @@@ -245,6 -248,12 +240,12 @@@ struct qdio_input_q struct qdio_output_q { /* PCIs are enabled for the queue */ int pci_out_enabled; + /* cq: use asynchronous output buffers */ + int use_cq; + /* cq: aobs used for particual SBAL */ + struct qaob **aobs; + /* cq: sbal state related to asynchronous operation */ + struct qdio_outbuf_state *sbal_state; /* timer to check for more outbound work */ struct timer_list timer; /* used SBALs before tasklet schedule */ @@@ -281,9 -290,6 +282,9 @@@ struct qdio_q /* error condition during a data transfer */ unsigned int qdio_error;
+ /* last scan of the queue */ + u64 timestamp; + struct tasklet_struct tasklet; struct qdio_queue_perf_stat q_stats;
@@@ -427,13 -433,22 +428,24 @@@ struct indicator_t
extern struct indicator_t *q_indicators;
- static inline int shared_ind(u32 *dsci) + static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq) + { + return irq->nr_input_qs > 1; + } + + static inline int references_shared_dsci(struct qdio_irq *irq) { - return dsci == &q_indicators[TIQDIO_SHARED_IND].ind; + return irq->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; + } + + static inline int shared_ind(struct qdio_q *q) + { + struct qdio_irq *i = q->irq_ptr; + return references_shared_dsci(i) || has_multiple_inq_on_dsci(i); }
+extern u64 last_ai_time; + /* prototypes for thin interrupt */ void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); @@@ -446,6 -461,7 +458,7 @@@ void tiqdio_free_memory(void) int tiqdio_register_thinints(void); void tiqdio_unregister_thinints(void);
+ /* prototypes for setup */ void qdio_inbound_processing(unsigned long data); void qdio_outbound_processing(unsigned long data); @@@ -466,6 -482,9 +479,9 @@@ int qdio_setup_create_sysfs(struct ccw_ void qdio_setup_destroy_sysfs(struct ccw_device *cdev); int qdio_setup_init(void); void qdio_setup_exit(void); + int qdio_enable_async_operation(struct qdio_output_q *q); + void qdio_disable_async_operation(struct qdio_output_q *q); + struct qaob *qdio_allocate_aob(void);
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); diff --combined drivers/s390/cio/qdio_debug.c index 7cc4a75,aaf7f93..ed68245 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@@ -54,17 -54,15 +54,17 @@@ static int qstat_show(struct seq_file * if (!q) return 0;
- seq_printf(m, "DSCI: %d nr_used: %d\n", - *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); - seq_printf(m, "ftc: %d last_move: %d\n", + seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n", + q->timestamp, last_ai_time); + seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n", + atomic_read(&q->nr_buf_used), q->first_to_check, q->last_move); if (q->is_input_q) { seq_printf(m, "polling: %d ack start: %d ack count: %d\n", q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); - seq_printf(m, "IRQs disabled: %u\n", + seq_printf(m, "DSCI: %d IRQs disabled: %u\n", + *(u32 *)q->irq_ptr->dsci, test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state)); } @@@ -78,6 -76,9 +78,9 @@@ case SLSB_P_OUTPUT_NOT_INIT: seq_printf(m, "N"); break; + case SLSB_P_OUTPUT_PENDING: + seq_printf(m, "P"); + break; case SLSB_P_INPUT_PRIMED: case SLSB_CU_OUTPUT_PRIMED: seq_printf(m, "+"); diff --combined drivers/s390/cio/qdio_main.c index 293fc89,9a12228..a7ecfa8 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@@ -14,6 -14,8 +14,7 @@@ #include <linux/timer.h> #include <linux/delay.h> #include <linux/gfp.h> + #include <linux/io.h> -#include <linux/kernel_stat.h> #include <linux/atomic.h> #include <asm/debug.h> #include <asm/qdio.h> @@@ -76,11 -78,13 +77,13 @@@ static inline int do_siga_input(unsigne * Note: For IQDC unicast queues only the highest priority queue is processed. */ static inline int do_siga_output(unsigned long schid, unsigned long mask, - unsigned int *bb, unsigned int fc) + unsigned int *bb, unsigned int fc, + unsigned long aob) { register unsigned long __fc asm("0") = fc; register unsigned long __schid asm("1") = schid; register unsigned long __mask asm("2") = mask; + register unsigned long __aob asm("3") = aob; int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
asm volatile( @@@ -89,7 -93,8 +92,8 @@@ " srl %0,28\n" "1:\n" EX_TABLE(0b, 1b) - : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) + : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask), + "+d" (__aob) : : "cc", "memory"); *bb = ((unsigned int) __fc) >> 31; return cc; @@@ -100,12 -105,9 +104,12 @@@ static inline int qdio_check_ccq(struc /* all done or next buffer state different */ if (ccq == 0 || ccq == 32) return 0; - /* not all buffers processed */ - if (ccq == 96 || ccq == 97) + /* no buffer processed */ + if (ccq == 97) return 1; + /* not all buffers processed */ + if (ccq == 96) + return 2; /* notify devices immediately */ DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); return -EIO; @@@ -125,8 -127,10 +129,8 @@@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, int start, int count, int auto_ack) { + int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; unsigned int ccq = 0; - int tmp_count = count, tmp_start = start; - int nr = q->nr; - int rc;
BUG_ON(!q->irq_ptr->sch_token); qperf_inc(q, eqbs); @@@ -137,36 -141,29 +141,36 @@@ again ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, auto_ack); rc = qdio_check_ccq(q, ccq); - - /* At least one buffer was processed, return and extract the remaining - * buffers later. - */ - if ((ccq == 96) && (count != tmp_count)) { - qperf_inc(q, eqbs_partial); - return (count - tmp_count); - } + if (!rc) + return count - tmp_count;
if (rc == 1) { DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); goto again; }
- if (rc < 0) { - DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); - DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); - q->handler(q->irq_ptr->cdev, - QDIO_ERROR_ACTIVATE_CHECK_CONDITION, - 0, -1, -1, q->irq_ptr->int_parm); - return 0; + if (rc == 2) { + BUG_ON(tmp_count == count); + qperf_inc(q, eqbs_partial); + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", + tmp_count); + /* + * Retry once, if that fails bail out and process the + * extracted buffers before trying again. + */ + if (!retried++) + goto again; + else + return count - tmp_count; } - return count - tmp_count; + + DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, + QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + q->nr, q->first_to_kick, count, + q->irq_ptr->int_parm); + return 0; }
/** @@@ -199,28 -196,27 +203,28 @@@ static int qdio_do_sqbs(struct qdio_q * again: ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); rc = qdio_check_ccq(q, ccq); - if (rc == 1) { + if (!rc) { + WARN_ON(tmp_count); + return count - tmp_count; + } + + if (rc == 1 || rc == 2) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); qperf_inc(q, sqbs_partial); goto again; } - if (rc < 0) { - DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); - DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); - q->handler(q->irq_ptr->cdev, - QDIO_ERROR_ACTIVATE_CHECK_CONDITION, - 0, -1, -1, q->irq_ptr->int_parm); - return 0; - } - WARN_ON(tmp_count); - return count - tmp_count; + + DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); + return 0; }
/* returns number of examined buffers and their common state in *state */ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, unsigned char *state, unsigned int count, - int auto_ack) + int auto_ack, int merge_pending) { unsigned char __state = 0; int i; @@@ -232,9 -228,14 +236,14 @@@ return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
for (i = 0; i < count; i++) { - if (!__state) + if (!__state) { __state = q->slsb.val[bufnr]; - else if (q->slsb.val[bufnr] != __state) + if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) + __state = SLSB_P_OUTPUT_EMPTY; + } else if (merge_pending) { + if ((q->slsb.val[bufnr] & __state) != __state) + break; + } else if (q->slsb.val[bufnr] != __state) break; bufnr = next_buf(bufnr); } @@@ -245,7 -246,7 +254,7 @@@ static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, int auto_ack) { - return get_buf_states(q, bufnr, state, 1, auto_ack); + return get_buf_states(q, bufnr, state, 1, auto_ack, 0); }
/* wrap-around safe setting of slsb states, returns number of changed buffers */ @@@ -274,7 -275,7 +283,7 @@@ static inline int set_buf_state(struct }
/* set slsb states to initial state */ -void qdio_init_buf_states(struct qdio_irq *irq_ptr) +static void qdio_init_buf_states(struct qdio_irq *irq_ptr) { struct qdio_q *q; int i; @@@ -316,19 -317,28 +325,28 @@@ static inline int qdio_siga_sync_q(stru return qdio_siga_sync(q, q->mask, 0); }
- static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) + static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, + unsigned long aob) { unsigned long schid = *((u32 *) &q->irq_ptr->schid); unsigned int fc = QDIO_SIGA_WRITE; u64 start_time = 0; int retries = 0, cc; + unsigned long laob = 0; + + if (q->u.out.use_cq && aob != 0) { + fc = QDIO_SIGA_WRITEQ; + laob = aob; + }
if (is_qebsm(q)) { schid = q->irq_ptr->sch_token; fc |= QDIO_SIGA_QEBSM_FLAG; } again: - cc = do_siga_output(schid, q->mask, busy_bit, fc); + WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) || + (aob && fc != QDIO_SIGA_WRITEQ)); + cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
/* hipersocket busy condition */ if (unlikely(*busy_bit)) { @@@ -387,7 -397,7 +405,7 @@@ int debug_get_buf_state(struct qdio_q * { if (need_siga_sync(q)) qdio_siga_sync_q(q); - return get_buf_states(q, bufnr, state, 1, 0); + return get_buf_states(q, bufnr, state, 1, 0, 0); }
static inline void qdio_stop_polling(struct qdio_q *q) @@@ -434,7 -444,7 +452,7 @@@ static void process_buffer_error(struc qperf_inc(q, target_full); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", q->first_to_check); - return; + goto set; }
DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); @@@ -444,7 -454,6 +462,7 @@@ q->sbal[q->first_to_check]->element[14].sflags, q->sbal[q->first_to_check]->element[15].sflags);
+set: /* * Interrupts may be avoided as long as the error is present * so change the buffer state immediately to avoid starvation. @@@ -502,8 -511,6 +520,8 @@@ static int get_inbound_buffer_frontier( int count, stop; unsigned char state = 0;
+ q->timestamp = get_clock_fast(); + /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved * would return 0. @@@ -518,7 -525,7 +536,7 @@@ * No siga sync here, as a PCI or we after a thin interrupt * already sync'ed the queues. */ - count = get_buf_states(q, q->first_to_check, &state, count, 1); + count = get_buf_states(q, q->first_to_check, &state, count, 1, 0); if (!count) goto out;
@@@ -601,6 -608,107 +619,107 @@@ static inline int qdio_inbound_q_done(s return 0; }
+ static inline int contains_aobs(struct qdio_q *q) + { + return !q->is_input_q && q->u.out.use_cq; + } + + static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q, + int i, struct qaob *aob) + { + int tmp; + + DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i, + (unsigned long) virt_to_phys(aob)); + DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx", + (unsigned long) aob->res0[0]); + DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx", + (unsigned long) aob->res0[1]); + DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx", + (unsigned long) aob->res0[2]); + DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx", + (unsigned long) aob->res0[3]); + DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx", + (unsigned long) aob->res0[4]); + DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx", + (unsigned long) aob->res0[5]); + DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1); + DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2); + DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3); + DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc); + DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags); + DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs); + DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count); + for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) { + DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp, + (unsigned long) aob->sba[tmp]); + DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp, + (unsigned long) q->sbal[i]->element[tmp].addr); + DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]); + DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp, + q->sbal[i]->element[tmp].length); + } + DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0); + for (tmp = 0; tmp < 2; ++tmp) { + DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp, + (unsigned long) aob->res4[tmp]); + } + DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1); + DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2); + } + + static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) + { + unsigned char state = 0; + int j, b = start; + + if (!contains_aobs(q)) + return; + + for (j = 0; j < count; ++j) { + get_buf_state(q, b, &state, 0); + if (state == SLSB_P_OUTPUT_PENDING) { + struct qaob *aob = q->u.out.aobs[b]; + if (aob == NULL) + continue; + + BUG_ON(q->u.out.sbal_state == NULL); + q->u.out.sbal_state[b].flags |= + QDIO_OUTBUF_STATE_FLAG_PENDING; + q->u.out.aobs[b] = NULL; + } else if (state == SLSB_P_OUTPUT_EMPTY) { + BUG_ON(q->u.out.sbal_state == NULL); + q->u.out.sbal_state[b].aob = NULL; + } + b = next_buf(b); + } + } + + static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, + int bufnr) + { + unsigned long phys_aob = 0; + + if (!q->use_cq) + goto out; + + if (!q->aobs[bufnr]) { + struct qaob *aob = qdio_allocate_aob(); + q->aobs[bufnr] = aob; + } + if (q->aobs[bufnr]) { + BUG_ON(q->sbal_state == NULL); + q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; + q->sbal_state[bufnr].aob = q->aobs[bufnr]; + q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; + phys_aob = virt_to_phys(q->aobs[bufnr]); + BUG_ON(phys_aob & 0xFF); + } + + out: + return phys_aob; + } + static void qdio_kick_handler(struct qdio_q *q) { int start = q->first_to_kick; @@@ -621,6 -729,8 +740,8 @@@ start, count); }
+ qdio_handle_aobs(q, start, count); + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, q->irq_ptr->int_parm);
@@@ -670,8 -780,6 +791,8 @@@ static int get_outbound_buffer_frontier int count, stop; unsigned char state = 0;
+ q->timestamp = get_clock_fast(); + if (need_siga_sync(q)) if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || @@@ -685,23 -793,26 +806,26 @@@ */ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); stop = add_buf(q->first_to_check, count); - if (q->first_to_check == stop) - return q->first_to_check; + goto out;
- count = get_buf_states(q, q->first_to_check, &state, count, 0); + count = get_buf_states(q, q->first_to_check, &state, count, 0, 1); if (!count) - return q->first_to_check; + goto out;
switch (state) { + case SLSB_P_OUTPUT_PENDING: + BUG(); case SLSB_P_OUTPUT_EMPTY: /* the adapter got it */ - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, + "out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used); q->first_to_check = add_buf(q->first_to_check, count); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); + break; case SLSB_P_OUTPUT_ERROR: process_buffer_error(q, count); @@@ -714,7 -825,8 +838,8 @@@ /* the adapter has not fetched the output yet */ if (q->irq_ptr->perf_stat_enabled) q->q_stats.nr_sbal_nop++; - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", + q->nr); break; case SLSB_P_OUTPUT_NOT_INIT: case SLSB_P_OUTPUT_HALTED: @@@ -722,6 -834,8 +847,8 @@@ default: BUG(); } + + out: return q->first_to_check; }
@@@ -745,7 -859,7 +872,7 @@@ static inline int qdio_outbound_q_moved return 0; }
- static int qdio_kick_outbound_q(struct qdio_q *q) + static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) { int retries = 0, cc; unsigned int busy_bit; @@@ -757,7 -871,7 +884,7 @@@ retry: qperf_inc(q, siga_write);
- cc = qdio_siga_output(q, &busy_bit); + cc = qdio_siga_output(q, &busy_bit, aob); switch (cc) { case 0: break; @@@ -796,13 -910,21 +923,13 @@@ static void __qdio_outbound_processing( if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) goto sched;
- /* bail out for HiperSockets unicast queues */ - if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) - return; - - if ((queue_type(q) == QDIO_IQDIO_QFMT) && - (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) - goto sched; - if (q->u.out.pci_out_enabled) return;
/* * Now we know that queue type is either qeth without pci enabled - * or HiperSockets multicast. Make sure buffer switch from PRIMED to - * EMPTY is noticed and outbound_handler is called after some time. + * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY + * is noticed and outbound_handler is called after some time. */ if (qdio_outbound_q_done(q)) del_timer(&q->u.out.timer); @@@ -926,8 -1048,9 +1053,9 @@@ static void qdio_int_handler_pci(struc } q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, q->irq_ptr->int_parm); - } else + } else { tasklet_schedule(&q->tasklet); + } }
if (!pci_out_supported(q)) @@@ -947,7 -1070,6 +1075,7 @@@ static void qdio_handle_activate_check( { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_q *q; + int count;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); DBF_ERROR("intp :%lx", intparm); @@@ -961,10 -1083,8 +1089,10 @@@ dump_stack(); goto no_handler; } + + count = sub_buf(q->first_to_check, q->first_to_kick); q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, - 0, -1, -1, irq_ptr->int_parm); + q->nr, q->first_to_kick, count, irq_ptr->int_parm); no_handler: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); } @@@ -1003,6 -1123,7 +1131,6 @@@ void qdio_int_handler(struct ccw_devic return; }
- kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++; if (irq_ptr->perf_stat_enabled) irq_ptr->perf_stat.qdio_int++;
@@@ -1243,6 -1364,26 +1371,26 @@@ out_err } EXPORT_SYMBOL_GPL(qdio_allocate);
+ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) + { + struct qdio_q *q = irq_ptr->input_qs[0]; + int i, use_cq = 0; + + if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT) + use_cq = 1; + + for_each_output_queue(irq_ptr, q, i) { + if (use_cq) { + if (qdio_enable_async_operation(&q->u.out) < 0) { + use_cq = 0; + continue; + } + } else + qdio_disable_async_operation(&q->u.out); + } + DBF_EVENT("use_cq:%d", use_cq); + } + /** * qdio_establish - establish queues on a qdio subchannel * @init_data: initialization data @@@ -1308,6 -1449,8 +1456,8 @@@ int qdio_establish(struct qdio_initiali qdio_setup_ssqd_info(irq_ptr); DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
+ qdio_detect_hsicq(irq_ptr); + /* qebsm is now setup if available, initialize buffer states */ qdio_init_buf_states(irq_ptr);
@@@ -1449,12 -1592,9 +1599,9 @@@ set used = atomic_add_return(count, &q->nr_buf_used) - count; BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
- /* no need to signal as long as the adapter had free buffers */ - if (used) - return 0; - if (need_siga_in(q)) return qdio_siga_input(q); + return 0; }
@@@ -1487,17 -1627,21 +1634,21 @@@ static int handle_outbound(struct qdio_ q->u.out.pci_out_enabled = 0;
if (queue_type(q) == QDIO_IQDIO_QFMT) { - /* One SIGA-W per buffer required for unicast HiperSockets. */ + unsigned long phys_aob = 0; + + /* One SIGA-W per buffer required for unicast HSI */ WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
- rc = qdio_kick_outbound_q(q); + phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); + + rc = qdio_kick_outbound_q(q, phys_aob); } else if (need_siga_sync(q)) { rc = qdio_siga_sync_q(q); } else { /* try to fast requeue buffers */ get_buf_state(q, prev_buf(bufnr), &state, 0); if (state != SLSB_CU_OUTPUT_PRIMED) - rc = qdio_kick_outbound_q(q); + rc = qdio_kick_outbound_q(q, 0); else qperf_inc(q, fast_requeue); } @@@ -1525,6 -1669,7 +1676,7 @@@ int do_QDIO(struct ccw_device *cdev, un { struct qdio_irq *irq_ptr;
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) return -EINVAL;
@@@ -1569,7 -1714,7 +1721,7 @@@ int qdio_start_irq(struct ccw_device *c
WARN_ON(queue_irqs_enabled(q));
- if (!shared_ind(q->irq_ptr->dsci)) + if (!shared_ind(q)) xchg(q->irq_ptr->dsci, 0);
qdio_stop_polling(q); @@@ -1579,7 -1724,7 +1731,7 @@@ * We need to check again to not lose initiative after * resetting the ACK state. */ - if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci) + if (!shared_ind(q) && *q->irq_ptr->dsci) goto rescan; if (!qdio_inbound_q_done(q)) goto rescan; diff --combined drivers/s390/cio/qdio_setup.c index a82b2d3,dd8bd67..d9a46a4 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@@ -19,6 -19,22 +19,22 @@@ #include "qdio_debug.h"
static struct kmem_cache *qdio_q_cache; + static struct kmem_cache *qdio_aob_cache; + + struct qaob *qdio_allocate_aob() + { + struct qaob *aob; + + aob = kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC); + return aob; + } + EXPORT_SYMBOL_GPL(qdio_allocate_aob); + + void qdio_release_aob(struct qaob *aob) + { + kmem_cache_free(qdio_aob_cache, aob); + } + EXPORT_SYMBOL_GPL(qdio_release_aob);
/* * qebsm is only available under 64bit but the adapter sets the feature @@@ -154,29 -170,36 +170,36 @@@ static void setup_queues(struct qdio_ir struct qdio_q *q; void **input_sbal_array = qdio_init->input_sbal_addr_array; void **output_sbal_array = qdio_init->output_sbal_addr_array; + struct qdio_outbuf_state *output_sbal_state_array = + qdio_init->output_sbal_state_array; int i;
for_each_input_queue(irq_ptr, q, i) { - DBF_EVENT("in-q:%1d", i); + DBF_EVENT("inq:%1d", i); setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1; - q->u.in.queue_start_poll = qdio_init->queue_start_poll; + q->u.in.queue_start_poll = qdio_init->queue_start_poll[i]; + setup_storage_lists(q, irq_ptr, input_sbal_array, i); input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
- if (is_thinint_irq(irq_ptr)) + if (is_thinint_irq(irq_ptr)) { tasklet_init(&q->tasklet, tiqdio_inbound_processing, (unsigned long) q); - else + } else { tasklet_init(&q->tasklet, qdio_inbound_processing, (unsigned long) q); + } }
for_each_output_queue(irq_ptr, q, i) { DBF_EVENT("outq:%1d", i); setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+ q->u.out.sbal_state = output_sbal_state_array; + output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; + q->is_input_q = 0; q->u.out.scan_threshold = qdio_init->scan_threshold; setup_storage_lists(q, irq_ptr, output_sbal_array, i); @@@ -311,6 -334,19 +334,19 @@@ void qdio_release_memory(struct qdio_ir for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { q = irq_ptr->output_qs[i]; if (q) { + if (q->u.out.use_cq) { + int n; + + for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) { + struct qaob *aob = q->u.out.aobs[n]; + if (aob) { + qdio_release_aob(aob); + q->u.out.aobs[n] = NULL; + } + } + + qdio_disable_async_operation(&q->u.out); + } free_page((unsigned long) q->slib); kmem_cache_free(qdio_q_cache, q); } @@@ -345,7 -381,6 +381,7 @@@ static void setup_qdr(struct qdio_irq * int i;
irq_ptr->qdr->qfmt = qdio_init->q_format; + irq_ptr->qdr->ac = qdio_init->qdr_ac; irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ @@@ -466,23 -501,60 +502,60 @@@ void qdio_print_subchannel_info(struct printk(KERN_INFO "%s", s); }
+ int qdio_enable_async_operation(struct qdio_output_q *outq) + { + outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q, + GFP_ATOMIC); + if (!outq->aobs) { + outq->use_cq = 0; + return -ENOMEM; + } + outq->use_cq = 1; + return 0; + } + + void qdio_disable_async_operation(struct qdio_output_q *q) + { + kfree(q->aobs); + q->aobs = NULL; + q->use_cq = 0; + } + int __init qdio_setup_init(void) { + int rc; + qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 256, 0, NULL); if (!qdio_q_cache) return -ENOMEM;
+ qdio_aob_cache = kmem_cache_create("qdio_aob", + sizeof(struct qaob), + sizeof(struct qaob), + 0, + NULL); + if (!qdio_aob_cache) { + rc = -ENOMEM; + goto free_qdio_q_cache; + } + /* Check for OSA/FCP thin interrupts (bit 67). */ DBF_EVENT("thinint:%1d", (css_general_characteristics.aif_osa) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */ DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); - return 0; + rc = 0; + out: + return rc; + free_qdio_q_cache: + kmem_cache_destroy(qdio_q_cache); + goto out; }
void qdio_setup_exit(void) { + kmem_cache_destroy(qdio_aob_cache); kmem_cache_destroy(qdio_q_cache); } diff --combined drivers/s390/cio/qdio_thinint.c index 10750c7,a3e3949..9d1e7ef --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@@ -29,14 -29,14 +29,14 @@@
/* list of thin interrupt input queues */ static LIST_HEAD(tiq_list); -DEFINE_MUTEX(tiq_list_lock); +static DEFINE_MUTEX(tiq_list_lock);
/* adapter local summary indicator */ static u8 *tiqdio_alsi;
struct indicator_t *q_indicators;
-static u64 last_ai_time; +u64 last_ai_time;
/* returns addr for the device state change indicator */ static u32 *get_indicator(void) @@@ -67,12 -67,9 +67,9 @@@ static void put_indicator(u32 *addr
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) { - struct qdio_q *q; - int i; - mutex_lock(&tiq_list_lock); - for_each_input_queue(irq_ptr, q, i) - list_add_rcu(&q->entry, &tiq_list); + BUG_ON(irq_ptr->nr_input_qs < 1); + list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); mutex_unlock(&tiq_list_lock); xchg(irq_ptr->dsci, 1 << 7); } @@@ -80,19 -77,17 +77,17 @@@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) { struct qdio_q *q; - int i;
- for (i = 0; i < irq_ptr->nr_input_qs; i++) { - q = irq_ptr->input_qs[i]; - /* if establish triggered an error */ - if (!q || !q->entry.prev || !q->entry.next) - continue; + BUG_ON(irq_ptr->nr_input_qs < 1); + q = irq_ptr->input_qs[0]; + /* if establish triggered an error */ + if (!q || !q->entry.prev || !q->entry.next) + return;
- mutex_lock(&tiq_list_lock); - list_del_rcu(&q->entry); - mutex_unlock(&tiq_list_lock); - synchronize_rcu(); - } + mutex_lock(&tiq_list_lock); + list_del_rcu(&q->entry); + mutex_unlock(&tiq_list_lock); + synchronize_rcu(); }
static inline u32 clear_shared_ind(void) @@@ -102,6 -97,40 +97,40 @@@ return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); }
+ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) + { + struct qdio_q *q; + int i; + + for_each_input_queue(irq, q, i) { + if (!references_shared_dsci(irq) && + has_multiple_inq_on_dsci(irq)) + xchg(q->irq_ptr->dsci, 0); + + if (q->u.in.queue_start_poll) { + /* skip if polling is enabled or already in work */ + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) { + qperf_inc(q, int_discarded); + continue; + } + + /* avoid dsci clear here, done after processing */ + q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, + q->irq_ptr->int_parm); + } else { + if (!shared_ind(q)) + xchg(q->irq_ptr->dsci, 0); + + /* + * Call inbound processing but not directly + * since that could starve other thinint queues. + */ + tasklet_schedule(&q->tasklet); + } + } + } + /** * tiqdio_thinint_handler - thin interrupt handler for qdio * @alsi: pointer to adapter local summary indicator @@@ -120,35 -149,18 +149,18 @@@ static void tiqdio_thinint_handler(voi
/* check for work on all inbound thinint queues */ list_for_each_entry_rcu(q, &tiq_list, entry) { + struct qdio_irq *irq;
/* only process queues from changed sets */ - if (unlikely(shared_ind(q->irq_ptr->dsci))) { + irq = q->irq_ptr; + if (unlikely(references_shared_dsci(irq))) { if (!si_used) continue; - } else if (!*q->irq_ptr->dsci) + } else if (!*irq->dsci) continue;
- if (q->u.in.queue_start_poll) { - /* skip if polling is enabled or already in work */ - if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, - &q->u.in.queue_irq_state)) { - qperf_inc(q, int_discarded); - continue; - } + tiqdio_call_inq_handlers(irq);
- /* avoid dsci clear here, done after processing */ - q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, - q->irq_ptr->int_parm); - } else { - /* only clear it if the indicator is non-shared */ - if (!shared_ind(q->irq_ptr->dsci)) - xchg(q->irq_ptr->dsci, 0); - /* - * Call inbound processing but not directly - * since that could starve other thinint queues. - */ - tasklet_schedule(&q->tasklet); - } qperf_inc(q, adapter_int); } rcu_read_unlock(); diff --combined drivers/s390/net/lcs.c index 9367de0,fb246b9..c28713d --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@@ -26,6 -26,7 +26,6 @@@ #define KMSG_COMPONENT "lcs" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/if.h> #include <linux/netdevice.h> @@@ -1398,6 -1399,7 +1398,6 @@@ lcs_irq(struct ccw_device *cdev, unsign int rc, index; int cstat, dstat;
- kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++; if (lcs_check_irb_error(cdev, irb)) return;
@@@ -1970,7 -1972,7 +1970,7 @@@ lcs_portno_store (struct device *dev, s
static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
-const char *lcs_type[] = { +static const char *lcs_type[] = { "not a channel", "2216 parallel", "2216 channel", @@@ -2120,7 -2122,7 +2120,7 @@@ static const struct net_device_ops lcs_ .ndo_stop = lcs_stop_device, .ndo_get_stats = lcs_getstats, .ndo_start_xmit = lcs_start_xmit, - .ndo_set_multicast_list = lcs_set_multicast_list, + .ndo_set_rx_mode = lcs_set_multicast_list, };
static int @@@ -2397,7 -2399,6 +2397,7 @@@ static struct ccw_driver lcs_ccw_drive .ids = lcs_ids, .probe = ccwgroup_probe_ccwdev, .remove = ccwgroup_remove_ccwdev, + .int_class = IOINT_LCS, };
/** diff --combined drivers/s390/net/qeth_l3_main.c index 8a4e91d,ce73520..e4c1176 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@@ -29,6 -29,7 +29,7 @@@ #include <net/ip.h> #include <net/arp.h> #include <net/ip6_checksum.h> + #include <net/iucv/af_iucv.h>
#include "qeth_l3.h"
@@@ -267,7 -268,7 +268,7 @@@ static int __qeth_l3_insert_ip_todo(str } }
- static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) + int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) { unsigned long flags; int rc = 0; @@@ -286,7 -287,7 +287,7 @@@ return rc; }
- static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) + int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) { unsigned long flags; int rc = 0; @@@ -305,7 -306,7 +306,7 @@@ }
- static struct qeth_ipaddr *qeth_l3_get_addr_buffer( + struct qeth_ipaddr *qeth_l3_get_addr_buffer( enum qeth_prot_versions prot) { struct qeth_ipaddr *addr; @@@ -421,7 -422,7 +422,7 @@@ again list_splice(&fail_list, &card->ip_list); }
- static void qeth_l3_set_ip_addr_list(struct qeth_card *card) + void qeth_l3_set_ip_addr_list(struct qeth_card *card) { struct list_head *tbd_list; struct qeth_ipaddr *todo, *addr; @@@ -438,7 -439,7 +439,7 @@@
spin_lock_irqsave(&card->ip_lock, flags); tbd_list = card->ip_tbd_list; - card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); + card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC); if (!card->ip_tbd_list) { QETH_CARD_TEXT(card, 0, "silnomem"); card->ip_tbd_list = tbd_list; @@@ -1414,7 -1415,7 +1415,7 @@@ static int qeth_l3_send_checksum_comman return 0; }
-int qeth_l3_set_rx_csum(struct qeth_card *card, int on) +static int qeth_l3_set_rx_csum(struct qeth_card *card, int on) { int rc = 0;
@@@ -1993,12 -1994,13 +1994,13 @@@ static int qeth_l3_process_inbound_buff __u16 vlan_tag = 0; int is_vlan; unsigned int len; + __u16 magic;
*done = 0; BUG_ON(!budget); while (budget) { skb = qeth_core_get_next_skb(card, - card->qdio.in_q->bufs[card->rx.b_index].buffer, + &card->qdio.in_q->bufs[card->rx.b_index], &card->rx.b_element, &card->rx.e_offset, &hdr); if (!skb) { *done = 1; @@@ -2007,12 -2009,26 +2009,26 @@@ skb->dev = card->dev; switch (hdr->hdr.l3.id) { case QETH_HEADER_TYPE_LAYER3: - is_vlan = qeth_l3_rebuild_skb(card, skb, hdr, + magic = *(__u16 *)skb->data; + if ((card->info.type == QETH_CARD_TYPE_IQD) && + (magic == ETH_P_AF_IUCV)) { + skb->protocol = ETH_P_AF_IUCV; + skb->pkt_type = PACKET_HOST; + skb->mac_header = NET_SKB_PAD; + skb->dev = card->dev; + len = skb->len; + card->dev->header_ops->create(skb, card->dev, 0, + card->dev->dev_addr, "FAKELL", + card->dev->addr_len); + netif_receive_skb(skb); + } else { + is_vlan = qeth_l3_rebuild_skb(card, skb, hdr, &vlan_tag); - len = skb->len; - if (is_vlan && !card->options.sniffer) - __vlan_hwaccel_put_tag(skb, vlan_tag); - napi_gro_receive(&card->napi, skb); + len = skb->len; + if (is_vlan && !card->options.sniffer) + __vlan_hwaccel_put_tag(skb, vlan_tag); + napi_gro_receive(&card->napi, skb); + } break; case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ skb->pkt_type = PACKET_HOST; @@@ -2784,6 -2800,30 +2800,30 @@@ int inline qeth_l3_get_cast_type(struc return cast_type; }
+ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, + struct qeth_hdr *hdr, struct sk_buff *skb) + { + char daddr[16]; + struct af_iucv_trans_hdr *iucv_hdr; + + skb_pull(skb, 14); + card->dev->header_ops->create(skb, card->dev, 0, + card->dev->dev_addr, card->dev->dev_addr, + card->dev->addr_len); + skb_pull(skb, 14); + iucv_hdr = (struct af_iucv_trans_hdr *)skb->data; + memset(hdr, 0, sizeof(struct qeth_hdr)); + hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + hdr->hdr.l3.ext_flags = 0; + hdr->hdr.l3.length = skb->len; + hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; + memset(daddr, 0, sizeof(daddr)); + daddr[0] = 0xfe; + daddr[1] = 0x80; + memcpy(&daddr[8], iucv_hdr->destUserID, 8); + memcpy(hdr->hdr.l3.dest_addr, daddr, 16); + } + static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, struct sk_buff *skb, int ipv, int cast_type) { @@@ -2936,8 -2976,11 +2976,11 @@@ static int qeth_l3_hard_start_xmit(stru int data_offset = -1; int nr_frags;
- if (((card->info.type == QETH_CARD_TYPE_IQD) && (!ipv)) || - card->options.sniffer) + if (((card->info.type == QETH_CARD_TYPE_IQD) && + (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || + ((card->options.cq == QETH_CQ_ENABLED) && + (skb->protocol != ETH_P_AF_IUCV)))) || + card->options.sniffer) goto tx_drop;
if ((card->state != CARD_STATE_UP) || !card->lan_online) { @@@ -2959,7 -3002,10 +3002,10 @@@ if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && (skb_shinfo(skb)->nr_frags == 0)) { new_skb = skb; - data_offset = ETH_HLEN; + if (new_skb->protocol == ETH_P_AF_IUCV) + data_offset = 0; + else + data_offset = ETH_HLEN; hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) goto tx_drop; @@@ -2993,7 -3039,6 +3039,6 @@@ tag = (u16 *)(new_skb->data + 12); *tag = __constant_htons(ETH_P_8021Q); *(tag + 1) = htons(vlan_tx_tag_get(new_skb)); - new_skb->vlan_tci = 0; } }
@@@ -3025,9 -3070,13 +3070,13 @@@ qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); } else { - qeth_l3_fill_header(card, hdr, new_skb, ipv, - cast_type); - hdr->hdr.l3.length = new_skb->len - data_offset; + if (new_skb->protocol == ETH_P_AF_IUCV) + qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb); + else { + qeth_l3_fill_header(card, hdr, new_skb, ipv, + cast_type); + hdr->hdr.l3.length = new_skb->len - data_offset; + } }
if (skb->ip_summed == CHECKSUM_PARTIAL) @@@ -3226,7 -3275,7 +3275,7 @@@ static const struct net_device_ops qeth .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = qeth_l3_set_multicast_list, + .ndo_set_rx_mode = qeth_l3_set_multicast_list, .ndo_do_ioctl = qeth_l3_do_ioctl, .ndo_change_mtu = qeth_change_mtu, .ndo_fix_features = qeth_l3_fix_features, @@@ -3242,7 -3291,7 +3291,7 @@@ static const struct net_device_ops qeth .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = qeth_l3_set_multicast_list, + .ndo_set_rx_mode = qeth_l3_set_multicast_list, .ndo_do_ioctl = qeth_l3_do_ioctl, .ndo_change_mtu = qeth_change_mtu, .ndo_fix_features = qeth_l3_fix_features, @@@ -3290,6 -3339,8 +3339,8 @@@ static int qeth_l3_setup_netdev(struct card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; qeth_l3_iqd_read_initial_mac(card); + if (card->options.hsuid[0]) + memcpy(card->dev->perm_addr, card->options.hsuid, 9); } else return -ENODEV;
@@@ -3660,7 -3711,6 +3711,6 @@@ static int qeth_l3_ip6_event(struct not struct qeth_ipaddr *addr; struct qeth_card *card;
- card = qeth_l3_get_card_from_dev(dev); if (!card) return NOTIFY_DONE; diff --combined drivers/scsi/bnx2fc/bnx2fc.h index d882a2d1,dd335a2..63de1c7 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@@ -58,11 -58,11 +58,11 @@@
#include "57xx_hsi_bnx2fc.h" #include "bnx2fc_debug.h" - #include "../../net/cnic_if.h" + #include "../../net/ethernet/broadcom/cnic_if.h" #include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.4" +#define BNX2FC_VERSION "1.0.8"
#define PFX "bnx2fc: "
@@@ -81,7 -81,7 +81,7 @@@ #define BNX2FC_RQ_WQES_MAX 16 #define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
- #define BNX2FC_NUM_MAX_SESS 128 + #define BNX2FC_NUM_MAX_SESS 1024 #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 @@@ -224,7 -224,6 +224,7 @@@ struct bnx2fc_interface struct fcoe_ctlr ctlr; u8 vlan_enabled; int vlan_id; + bool enabled; };
#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr) diff --combined drivers/scsi/bnx2fc/bnx2fc_fcoe.c index ba7ecb1,820a184..85bcc4b --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@@ -22,7 -22,7 +22,7 @@@ DEFINE_PER_CPU(struct bnx2fc_percpu_s,
#define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Jun 23, 2011" +#define DRV_MODULE_RELDATE "Oct 02, 2011"
static char version[] __devinitdata = @@@ -56,7 -56,6 +56,7 @@@ static struct scsi_host_template bnx2fc static struct fc_function_template bnx2fc_transport_function; static struct fc_function_template bnx2fc_vport_xport_function; static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); +static void __bnx2fc_destroy(struct bnx2fc_interface *interface); static int bnx2fc_destroy(struct net_device *net_device); static int bnx2fc_enable(struct net_device *netdev); static int bnx2fc_disable(struct net_device *netdev); @@@ -65,6 -64,7 +65,6 @@@ static void bnx2fc_recv_frame(struct sk
static void bnx2fc_start_disc(struct bnx2fc_interface *interface); static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); -static int bnx2fc_net_config(struct fc_lport *lp); static int bnx2fc_lport_config(struct fc_lport *lport); static int bnx2fc_em_config(struct fc_lport *lport); static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); @@@ -78,7 -78,6 +78,7 @@@ static void bnx2fc_destroy_work(struct static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device *phys_dev); +static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface); static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
static int bnx2fc_fw_init(struct bnx2fc_hba *hba); @@@ -99,25 -98,6 +99,25 @@@ static struct notifier_block bnx2fc_cpu .notifier_call = bnx2fc_cpu_callback, };
+static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport) +{ + return ((struct bnx2fc_interface *) + ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; +} + +/** + * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block + * @lport: the local port + * @fc_lesb: the link error status block + */ +static void bnx2fc_get_lesb(struct fc_lport *lport, + struct fc_els_lesb *fc_lesb) +{ + struct net_device *netdev = bnx2fc_netdev(lport); + + __fcoe_get_lesb(lport, fc_lesb, netdev); +} + static void bnx2fc_clean_rx_queue(struct fc_lport *lp) { struct fcoe_percpu_s *bg; @@@ -322,7 -302,7 +322,7 @@@ static int bnx2fc_xmit(struct fc_lport return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) + cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ) + frag->page_offset; } else { cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); @@@ -565,14 -545,6 +565,14 @@@ static void bnx2fc_recv_frame(struct sk break; } } + + if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { + /* Drop incoming ABTS */ + put_cpu(); + kfree_skb(skb); + return; + } + if (le32_to_cpu(fr_crc(fp)) != ~crc32(~0, skb->data, fr_len)) { if (stats->InvalidCRCCount < 5) @@@ -701,7 -673,7 +701,7 @@@ static void bnx2fc_link_speed_update(st struct net_device *netdev = interface->netdev; struct ethtool_cmd ecmd;
- if (!dev_ethtool_get_settings(netdev, &ecmd)) { + if (!__ethtool_get_settings(netdev, &ecmd)) { lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); if (ecmd.supported & (SUPPORTED_1000baseT_Half | @@@ -755,7 -727,7 +755,7 @@@ void bnx2fc_get_link_state(struct bnx2f clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); }
-static int bnx2fc_net_config(struct fc_lport *lport) +static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) { struct bnx2fc_hba *hba; struct bnx2fc_interface *interface; @@@ -781,16 -753,11 +781,16 @@@ bnx2fc_link_speed_update(lport);
if (!lport->vport) { - wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0); + if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) + wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, + 1, 0); BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); fc_set_wwnn(lport, wwnn);
- wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0); + if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) + wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, + 2, 0); + BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); fc_set_wwpn(lport, wwpn); } @@@ -802,8 -769,8 +802,8 @@@ static void bnx2fc_destroy_timer(unsign { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
- BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - " - "Destroy compl not received!!\n"); + printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - " + "Destroy compl not received!!\n"); set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); wake_up_interruptible(&hba->destroy_wait); } @@@ -816,7 -783,7 +816,7 @@@ * @vlan_id: vlan id - associated vlan id with this event * * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and - * NETDEV_CHANGE_MTU events + * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans. */ static void bnx2fc_indicate_netevent(void *context, unsigned long event, u16 vlan_id) @@@ -824,11 -791,12 +824,11 @@@ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; struct fc_lport *lport; struct fc_lport *vport; - struct bnx2fc_interface *interface; + struct bnx2fc_interface *interface, *tmp; int wait_for_upload = 0; u32 link_possible = 1;
- /* Ignore vlans for now */ - if (vlan_id != 0) + if (vlan_id != 0 && event != NETDEV_UNREGISTER) return;
switch (event) { @@@ -852,18 -820,6 +852,18 @@@ case NETDEV_CHANGE: break;
+ case NETDEV_UNREGISTER: + if (!vlan_id) + return; + mutex_lock(&bnx2fc_dev_lock); + list_for_each_entry_safe(interface, tmp, &if_list, list) { + if (interface->hba == hba && + interface->vlan_id == (vlan_id & VLAN_VID_MASK)) + __bnx2fc_destroy(interface); + } + mutex_unlock(&bnx2fc_dev_lock); + return; + default: printk(KERN_ERR PFX "Unkonwn netevent %ld", event); return; @@@ -882,15 -838,8 +882,15 @@@ bnx2fc_link_speed_update(lport);
if (link_possible && !bnx2fc_link_ok(lport)) { - printk(KERN_ERR "indicate_netevent: ctlr_link_up\n"); - fcoe_ctlr_link_up(&interface->ctlr); + /* Reset max recv frame size to default */ + fc_set_mfs(lport, BNX2FC_MFS); + /* + * ctlr link up will only be handled during + * enable to avoid sending discovery solicitation + * on a stale vlan + */ + if (interface->enabled) + fcoe_ctlr_link_up(&interface->ctlr); } else if (fcoe_ctlr_link_down(&interface->ctlr)) { mutex_lock(&lport->lp_mutex); list_for_each_entry(vport, &lport->vports, list) @@@ -1046,26 -995,17 +1046,28 @@@ static int bnx2fc_vport_create(struct f struct bnx2fc_interface *interface = port->priv; struct net_device *netdev = interface->netdev; struct fc_lport *vn_port; + int rc; + char buf[32]; + + rc = fcoe_validate_vport_create(vport); + if (rc) { + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + printk(KERN_ERR PFX "Failed to create vport, " + "WWPN (0x%s) already exists\n", + buf); + return rc; + }
if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { printk(KERN_ERR PFX "vn ports cannot be created on" "this interface\n"); return -EIO; } + rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); vn_port = bnx2fc_if_create(interface, &vport->dev, 1); mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock();
if (IS_ERR(vn_port)) { printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", @@@ -1084,46 -1024,16 +1086,46 @@@ return 0; }
+static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport) +{ + struct bnx2fc_lport *blport, *tmp; + + spin_lock_bh(&hba->hba_lock); + list_for_each_entry_safe(blport, tmp, &hba->vports, list) { + if (blport->lport == lport) { + list_del(&blport->list); + kfree(blport); + } + } + spin_unlock_bh(&hba->hba_lock); +} + static int bnx2fc_vport_destroy(struct fc_vport *vport) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port = vport->dd_data; struct fcoe_port *port = lport_priv(vn_port); + struct bnx2fc_interface *interface = port->priv; + struct fc_lport *v_port; + bool found = false;
mutex_lock(&n_port->lp_mutex); + list_for_each_entry(v_port, &n_port->vports, list) + if (v_port->vport == vport) { + found = true; + break; + } + + if (!found) { + mutex_unlock(&n_port->lp_mutex); + return -ENOENT; + } list_del(&vn_port->list); mutex_unlock(&n_port->lp_mutex); + bnx2fc_free_vport(interface->hba, port->lport); + bnx2fc_port_shutdown(port->lport); + bnx2fc_interface_put(interface); queue_work(bnx2fc_wq, &port->destroy_work); return 0; } @@@ -1144,7 -1054,7 +1146,7 @@@ static int bnx2fc_vport_disable(struct }
-static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface) +static int bnx2fc_interface_setup(struct bnx2fc_interface *interface) { struct net_device *netdev = interface->netdev; struct net_device *physdev = interface->hba->phys_dev; @@@ -1342,7 -1252,7 +1344,7 @@@ struct bnx2fc_interface *bnx2fc_interfa interface->ctlr.get_src_addr = bnx2fc_get_src_mac; set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
- rc = bnx2fc_netdev_setup(interface); + rc = bnx2fc_interface_setup(interface); if (!rc) return interface;
@@@ -1408,7 -1318,7 +1410,7 @@@ static struct fc_lport *bnx2fc_if_creat fc_set_wwpn(lport, vport->port_name); } /* Configure netdev and networking properties of the lport */ - rc = bnx2fc_net_config(lport); + rc = bnx2fc_net_config(lport, interface->netdev); if (rc) { printk(KERN_ERR PFX "Error on bnx2fc_net_config\n"); goto lp_config_err; @@@ -1462,7 -1372,7 +1464,7 @@@ free_blport return NULL; }
-static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface) +static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface) { /* Dont listen for Ethernet packets anymore */ __dev_remove_pack(&interface->fcoe_packet_type); @@@ -1470,11 -1380,10 +1472,11 @@@ synchronize_net(); }
-static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba) +static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) { + struct fc_lport *lport = interface->ctlr.lp; struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_lport *blport, *tmp; + struct bnx2fc_hba *hba = interface->hba;
/* Stop the transmit retry timer */ del_timer_sync(&port->timer); @@@ -1482,14 -1391,6 +1484,14 @@@ /* Free existing transmit skbs */ fcoe_clean_pending_queue(lport);
+ bnx2fc_net_cleanup(interface); + + bnx2fc_free_vport(hba, lport); +} + +static void bnx2fc_if_destroy(struct fc_lport *lport) +{ + /* Free queued packets for the receive thread */ bnx2fc_clean_rx_queue(lport);
@@@ -1506,22 -1407,19 +1508,22 @@@ /* Free memory used by statistical counters */ fc_lport_free_stats(lport);
- spin_lock_bh(&hba->hba_lock); - list_for_each_entry_safe(blport, tmp, &hba->vports, list) { - if (blport->lport == lport) { - list_del(&blport->list); - kfree(blport); - } - } - spin_unlock_bh(&hba->hba_lock); - /* Release Scsi_Host */ scsi_host_put(lport->host); }
+static void __bnx2fc_destroy(struct bnx2fc_interface *interface) +{ + struct fc_lport *lport = interface->ctlr.lp; + struct fcoe_port *port = lport_priv(lport); + + bnx2fc_interface_cleanup(interface); + bnx2fc_stop(interface); + list_del(&interface->list); + bnx2fc_interface_put(interface); + queue_work(bnx2fc_wq, &port->destroy_work); +} + /** * bnx2fc_destroy - Destroy a bnx2fc FCoE interface * @@@ -1535,6 -1433,8 +1537,6 @@@ static int bnx2fc_destroy(struct net_device *netdev) { struct bnx2fc_interface *interface = NULL; - struct bnx2fc_hba *hba; - struct fc_lport *lport; int rc = 0;
rtnl_lock(); @@@ -1547,9 -1447,15 +1549,9 @@@ goto netdev_err; }
- hba = interface->hba;
- bnx2fc_netdev_cleanup(interface); - lport = interface->ctlr.lp; - bnx2fc_stop(interface); - list_del(&interface->list); destroy_workqueue(interface->timer_work_queue); - bnx2fc_interface_put(interface); - bnx2fc_if_destroy(lport, hba); + __bnx2fc_destroy(interface);
netdev_err: mutex_unlock(&bnx2fc_dev_lock); @@@ -1561,13 -1467,22 +1563,13 @@@ static void bnx2fc_destroy_work(struct { struct fcoe_port *port; struct fc_lport *lport;
port = container_of(work, struct fcoe_port, destroy_work); lport = port->lport;
BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
- bnx2fc_port_shutdown(lport); - rtnl_lock(); - mutex_lock(&bnx2fc_dev_lock); - bnx2fc_if_destroy(lport, hba); - mutex_unlock(&bnx2fc_dev_lock); - rtnl_unlock(); + bnx2fc_if_destroy(lport); }
static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) @@@ -1746,7 -1661,6 +1748,7 @@@ static void bnx2fc_fw_destroy(struct bn wait_event_interruptible(hba->destroy_wait, test_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags)); + clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); /* This should never happen */ if (signal_pending(current)) flush_signals(current); @@@ -1809,7 -1723,7 +1811,7 @@@ static void bnx2fc_start_disc(struct bn lport = interface->ctlr.lp; BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
- if (!bnx2fc_link_ok(lport)) { + if (!bnx2fc_link_ok(lport) && interface->enabled) { BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); fcoe_ctlr_link_up(&interface->ctlr); fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; @@@ -1823,11 -1737,6 +1825,11 @@@ if (++wait_cnt > 12) break; } + + /* Reset max receive frame size to default */ + if (fc_set_mfs(lport, BNX2FC_MFS)) + return; + fc_lport_init(lport); fc_fabric_login(lport); } @@@ -1891,7 -1800,6 +1893,7 @@@ static int bnx2fc_disable(struct net_de rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); } else { + interface->enabled = false; fcoe_ctlr_link_down(&interface->ctlr); fcoe_clean_pending_queue(interface->ctlr.lp); } @@@ -1914,10 -1822,8 +1916,10 @@@ static int bnx2fc_enable(struct net_dev if (!interface || !interface->ctlr.lp) { rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); - } else if (!bnx2fc_link_ok(interface->ctlr.lp)) + } else if (!bnx2fc_link_ok(interface->ctlr.lp)) { fcoe_ctlr_link_up(&interface->ctlr); + interface->enabled = true; + }
mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); @@@ -2017,6 -1923,7 +2019,6 @@@ static int bnx2fc_create(struct net_dev if (!lport) { printk(KERN_ERR PFX "Failed to create interface (%s)\n", netdev->name); - bnx2fc_netdev_cleanup(interface); rc = -EINVAL; goto if_create_err; } @@@ -2029,15 -1936,8 +2031,15 @@@ /* Make this master N_port */ interface->ctlr.lp = lport;
+ if (!bnx2fc_link_ok(lport)) { + fcoe_ctlr_link_up(&interface->ctlr); + fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; + set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); + } + BNX2FC_HBA_DBG(lport, "create: START DISC\n"); bnx2fc_start_disc(interface); + interface->enabled = true; /* * Release from kref_init in bnx2fc_interface_setup, on success * lport should be holding a reference taken in bnx2fc_if_create @@@ -2051,7 -1951,6 +2053,7 @@@ if_create_err: destroy_workqueue(interface->timer_work_queue); ifput_err: + bnx2fc_net_cleanup(interface); bnx2fc_interface_put(interface); netdev_err: module_put(THIS_MODULE); @@@ -2118,6 -2017,7 +2120,6 @@@ static void bnx2fc_ulp_exit(struct cnic { struct bnx2fc_hba *hba; struct bnx2fc_interface *interface, *tmp; - struct fc_lport *lport;
BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
@@@ -2139,10 -2039,18 +2141,10 @@@ list_del_init(&hba->list); adapter_count--;
- list_for_each_entry_safe(interface, tmp, &if_list, list) { + list_for_each_entry_safe(interface, tmp, &if_list, list) /* destroy not called yet, move to quiesced list */ - if (interface->hba == hba) { - bnx2fc_netdev_cleanup(interface); - bnx2fc_stop(interface); - - list_del(&interface->list); - lport = interface->ctlr.lp; - bnx2fc_interface_put(interface); - bnx2fc_if_destroy(lport, hba); - } - } + if (interface->hba == hba) + __bnx2fc_destroy(interface); mutex_unlock(&bnx2fc_dev_lock);
bnx2fc_ulp_stop(hba); @@@ -2211,7 -2119,7 +2213,7 @@@ static void bnx2fc_percpu_thread_create (void *)p, "bnx2fc_thread/%d", cpu); /* bind thread to the cpu */ - if (likely(!IS_ERR(p->iothread))) { + if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu); p->iothread = thread; wake_up_process(thread); @@@ -2223,6 -2131,7 +2225,6 @@@ static void bnx2fc_percpu_thread_destro struct bnx2fc_percpu_s *p; struct task_struct *thread; struct bnx2fc_work *work, *tmp; - LIST_HEAD(work_list);
BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
@@@ -2234,7 -2143,7 +2236,7 @@@
/* Free all work in the list */ - list_for_each_entry_safe(work, tmp, &work_list, list) { + list_for_each_entry_safe(work, tmp, &p->work_list, list) { list_del_init(&work->list); bnx2fc_process_cq_compl(work->tgt, work->wqe); kfree(work); @@@ -2467,7 -2376,6 +2469,7 @@@ static struct fc_function_template bnx2 .vport_create = bnx2fc_vport_create, .vport_delete = bnx2fc_vport_destroy, .vport_disable = bnx2fc_vport_disable, + .bsg_request = fc_lport_bsg_request, };
static struct fc_function_template bnx2fc_vport_xport_function = { @@@ -2501,7 -2409,6 +2503,7 @@@ .get_fc_host_stats = fc_get_host_stats, .issue_fc_host_lip = bnx2fc_fcoe_reset, .terminate_rport_io = fc_rport_terminate_io, + .bsg_request = fc_lport_bsg_request, };
/** @@@ -2531,7 -2438,6 +2533,7 @@@ static struct libfc_function_template b .elsct_send = bnx2fc_elsct_send, .fcp_abort_io = bnx2fc_abort_io, .fcp_cleanup = bnx2fc_cleanup, + .get_lesb = bnx2fc_get_lesb, .rport_event_callback = bnx2fc_rport_event_handler, };
diff --combined drivers/scsi/cxgbi/libcxgbi.c index 67ded44,1c1329b..c363a4b --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@@ -1787,7 -1787,7 +1787,7 @@@ static int sgl_seek_offset(struct scatt }
static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, - unsigned int dlen, skb_frag_t *frags, + unsigned int dlen, struct page_frag *frags, int frag_max) { unsigned int datalen = dlen; @@@ -1814,7 -1814,7 +1814,7 @@@ copy = min(datalen, sglen); if (i && page == frags[i - 1].page && sgoffset + sg->offset == - frags[i - 1].page_offset + frags[i - 1].size) { + frags[i - 1].offset + frags[i - 1].size) { frags[i - 1].size += copy; } else { if (i >= frag_max) { @@@ -1824,7 -1824,7 +1824,7 @@@ }
frags[i].page = page; - frags[i].page_offset = sg->offset + sgoffset; + frags[i].offset = sg->offset + sgoffset; frags[i].size = copy; i++; } @@@ -1944,14 -1944,14 +1944,14 @@@ int cxgbi_conn_init_pdu(struct iscsi_ta if (tdata->nr_frags > MAX_SKB_FRAGS || (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { char *dst = skb->data + task->hdr_len; - skb_frag_t *frag = tdata->frags; + struct page_frag *frag = tdata->frags;
/* data fits in the skb's headroom */ for (i = 0; i < tdata->nr_frags; i++, frag++) { char *src = kmap_atomic(frag->page, KM_SOFTIRQ0);
- memcpy(dst, src+frag->page_offset, frag->size); + memcpy(dst, src+frag->offset, frag->size); dst += frag->size; kunmap_atomic(src, KM_SOFTIRQ0); } @@@ -1962,11 -1962,13 +1962,13 @@@ skb_put(skb, count + padlen); } else { /* data fit into frag_list */ - for (i = 0; i < tdata->nr_frags; i++) - get_page(tdata->frags[i].page); - - memcpy(skb_shinfo(skb)->frags, tdata->frags, - sizeof(skb_frag_t) * tdata->nr_frags); + for (i = 0; i < tdata->nr_frags; i++) { + __skb_fill_page_desc(skb, i, + tdata->frags[i].page, + tdata->frags[i].offset, + tdata->frags[i].size); + skb_frag_ref(skb, i); + } skb_shinfo(skb)->nr_frags = tdata->nr_frags; skb->len += count; skb->data_len += count; @@@ -2566,62 -2568,6 +2568,62 @@@ void cxgbi_iscsi_cleanup(struct iscsi_t } EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
+mode_t cxgbi_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); + static int __init libcxgbi_init_module(void) { sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; diff --combined drivers/scsi/cxgbi/libcxgbi.h index 5d453a0,3a25b11..20c88279 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@@ -574,7 -574,7 +574,7 @@@ struct cxgbi_endpoint #define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512) struct cxgbi_task_data { unsigned short nr_frags; - skb_frag_t frags[MAX_PDU_FRAGS]; + struct page_frag frags[MAX_PDU_FRAGS]; struct sk_buff *skb; unsigned int offset; unsigned int count; @@@ -709,7 -709,6 +709,7 @@@ int cxgbi_conn_xmit_pdu(struct iscsi_ta
void cxgbi_cleanup_task(struct iscsi_task *task);
+mode_t cxgbi_attr_is_visible(int param_type, int param); void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); int cxgbi_set_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *, int); diff --combined drivers/scsi/fcoe/fcoe.c index cda1b19,9d3d817..452dfe5 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@@ -52,7 -52,7 +52,7 @@@ MODULE_DESCRIPTION("FCoE") MODULE_LICENSE("GPL v2");
/* Performance tuning parameters for fcoe */ -static unsigned int fcoe_ddp_min; +static unsigned int fcoe_ddp_min = 4096; module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ "Direct Data Placement (DDP)."); @@@ -138,6 -138,7 +138,6 @@@ static int fcoe_vport_create(struct fc_ static int fcoe_vport_disable(struct fc_vport *, bool disable); static void fcoe_set_vport_symbolic_name(struct fc_vport *); static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); -static int fcoe_validate_vport_create(struct fc_vport *);
static struct libfc_function_template fcoe_libfc_fcn_templ = { .frame_send = fcoe_xmit, @@@ -280,7 -281,6 +280,7 @@@ static int fcoe_interface_setup(struct * use the first one for SPMA */ real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? vlan_dev_real_dev(netdev) : netdev; + fcoe->realdev = real_dev; rcu_read_lock(); for_each_dev_addr(real_dev, ha) { if ((ha->type == NETDEV_HW_ADDR_T_SAN) && @@@ -581,6 -581,23 +581,6 @@@ static int fcoe_lport_config(struct fc_ }
/** - * fcoe_get_wwn() - Get the world wide name from LLD if it supports it - * @netdev: the associated net device - * @wwn: the output WWN - * @type: the type of WWN (WWPN or WWNN) - * - * Returns: 0 for success - */ -static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) -{ - const struct net_device_ops *ops = netdev->netdev_ops; - - if (ops->ndo_fcoe_get_wwn) - return ops->ndo_fcoe_get_wwn(netdev, wwn, type); - return -EINVAL; -} - -/** * fcoe_netdev_features_change - Updates the lport's offload flags based * on the LLD netdev's FCoE feature flags */ @@@ -1118,9 -1135,8 +1118,9 @@@ static void fcoe_percpu_thread_create(u
p = &per_cpu(fcoe_percpu, cpu);
- thread = kthread_create(fcoe_percpu_receive_thread, - (void *)p, "fcoethread/%d", cpu); + thread = kthread_create_on_node(fcoe_percpu_receive_thread, + (void *)p, cpu_to_node(cpu), + "fcoethread/%d", cpu);
if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu); @@@ -1502,7 -1518,7 +1502,7 @@@ int fcoe_xmit(struct fc_lport *lport, s return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) + cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ) + frag->page_offset; } else { cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); @@@ -1523,13 -1539,7 +1523,13 @@@ skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); - skb->dev = fcoe->netdev; + if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && + fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { + skb->vlan_tci = VLAN_TAG_PRESENT | + vlan_dev_vlan_id(fcoe->netdev); + skb->dev = fcoe->realdev; + } else + skb->dev = fcoe->netdev;
/* fill up mac and fcoe headers */ eh = eth_hdr(skb); @@@ -2036,7 -2046,7 +2036,7 @@@ int fcoe_link_speed_update(struct fc_lp struct net_device *netdev = fcoe_netdev(lport); struct ethtool_cmd ecmd;
- if (!dev_ethtool_get_settings(netdev, &ecmd)) { + if (!__ethtool_get_settings(netdev, &ecmd)) { lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); if (ecmd.supported & (SUPPORTED_1000baseT_Half | @@@ -2437,7 -2447,7 +2437,7 @@@ static int fcoe_vport_create(struct fc_
rc = fcoe_validate_vport_create(vport); if (rc) { - wwn_to_str(vport->port_name, buf, sizeof(buf)); + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); printk(KERN_ERR "fcoe: Failed to create vport, " "WWPN (0x%s) already exists\n", buf); @@@ -2445,7 -2455,9 +2445,9 @@@ }
mutex_lock(&fcoe_config_mutex); + rtnl_lock(); vn_port = fcoe_if_create(fcoe, &vport->dev, 1); + rtnl_unlock(); mutex_unlock(&fcoe_config_mutex);
if (IS_ERR(vn_port)) { @@@ -2544,9 -2556,28 +2546,9 @@@ static void fcoe_set_vport_symbolic_nam static void fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb) { - unsigned int cpu; - u32 lfc, vlfc, mdac; - struct fcoe_dev_stats *devst; - struct fcoe_fc_els_lesb *lesb; - struct rtnl_link_stats64 temp; struct net_device *netdev = fcoe_netdev(lport);
- lfc = 0; - vlfc = 0; - mdac = 0; - lesb = (struct fcoe_fc_els_lesb *)fc_lesb; - memset(lesb, 0, sizeof(*lesb)); - for_each_possible_cpu(cpu) { - devst = per_cpu_ptr(lport->dev_stats, cpu); - lfc += devst->LinkFailureCount; - vlfc += devst->VLinkFailureCount; - mdac += devst->MissDiscAdvCount; - } - lesb->lesb_link_fail = htonl(lfc); - lesb->lesb_vlink_fail = htonl(vlfc); - lesb->lesb_miss_fka = htonl(mdac); - lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors); + __fcoe_get_lesb(lport, fc_lesb, netdev); }
/** @@@ -2570,3 -2601,49 +2572,3 @@@ static void fcoe_set_port_id(struct fc_ if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); } - -/** - * fcoe_validate_vport_create() - Validate a vport before creating it - * @vport: NPIV port to be created - * - * This routine is meant to add validation for a vport before creating it - * via fcoe_vport_create(). - * Current validations are: - * - WWPN supplied is unique for given lport - * - * -*/ -static int fcoe_validate_vport_create(struct fc_vport *vport) -{ - struct Scsi_Host *shost = vport_to_shost(vport); - struct fc_lport *n_port = shost_priv(shost); - struct fc_lport *vn_port; - int rc = 0; - char buf[32]; - - mutex_lock(&n_port->lp_mutex); - - wwn_to_str(vport->port_name, buf, sizeof(buf)); - /* Check if the wwpn is not same as that of the lport */ - if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) { - FCOE_DBG("vport WWPN 0x%s is same as that of the " - "base port WWPN\n", buf); - rc = -EINVAL; - goto out; - } - - /* Check if there is any existing vport with same wwpn */ - list_for_each_entry(vn_port, &n_port->vports, list) { - if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) { - FCOE_DBG("vport with given WWPN 0x%s already " - "exists\n", buf); - rc = -EINVAL; - break; - } - } - -out: - mutex_unlock(&n_port->lp_mutex); - - return rc; -} diff --combined drivers/scsi/fcoe/fcoe_transport.c index 7264d0d,dac8e39..bd97b22 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@@ -83,107 -83,6 +83,107 @@@ static struct notifier_block libfcoe_no .notifier_call = libfcoe_device_notification, };
+void __fcoe_get_lesb(struct fc_lport *lport, + struct fc_els_lesb *fc_lesb, + struct net_device *netdev) +{ + unsigned int cpu; + u32 lfc, vlfc, mdac; + struct fcoe_dev_stats *devst; + struct fcoe_fc_els_lesb *lesb; + struct rtnl_link_stats64 temp; + + lfc = 0; + vlfc = 0; + mdac = 0; + lesb = (struct fcoe_fc_els_lesb *)fc_lesb; + memset(lesb, 0, sizeof(*lesb)); + for_each_possible_cpu(cpu) { + devst = per_cpu_ptr(lport->dev_stats, cpu); + lfc += devst->LinkFailureCount; + vlfc += devst->VLinkFailureCount; + mdac += devst->MissDiscAdvCount; + } + lesb->lesb_link_fail = htonl(lfc); + lesb->lesb_vlink_fail = htonl(vlfc); + lesb->lesb_miss_fka = htonl(mdac); + lesb->lesb_fcs_error = + htonl(dev_get_stats(netdev, &temp)->rx_crc_errors); +} +EXPORT_SYMBOL_GPL(__fcoe_get_lesb); + +void fcoe_wwn_to_str(u64 wwn, char *buf, int len) +{ + u8 wwpn[8]; + + u64_to_wwn(wwn, wwpn); + snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x", + wwpn[0], wwpn[1], wwpn[2], wwpn[3], + wwpn[4], wwpn[5], wwpn[6], wwpn[7]); +} +EXPORT_SYMBOL_GPL(fcoe_wwn_to_str); + +/** + * fcoe_validate_vport_create() - Validate a vport before creating it + * @vport: NPIV port to be created + * + * This routine is meant to add validation for a vport before creating it + * via fcoe_vport_create(). + * Current validations are: + * - WWPN supplied is unique for given lport + */ +int fcoe_validate_vport_create(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port; + int rc = 0; + char buf[32]; + + mutex_lock(&n_port->lp_mutex); + + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + /* Check if the wwpn is not same as that of the lport */ + if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) { + LIBFCOE_TRANSPORT_DBG("vport WWPN 0x%s is same as that of the " + "base port WWPN\n", buf); + rc = -EINVAL; + goto out; + } + + /* Check if there is any existing vport with same wwpn */ + list_for_each_entry(vn_port, &n_port->vports, list) { + if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) { + LIBFCOE_TRANSPORT_DBG("vport with given WWPN 0x%s " + "already exists\n", buf); + rc = -EINVAL; + break; + } + } +out: + mutex_unlock(&n_port->lp_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(fcoe_validate_vport_create); + +/** + * fcoe_get_wwn() - Get the world wide name from LLD if it supports it + * @netdev: the associated net device + * @wwn: the output WWN + * @type: the type of WWN (WWPN or WWNN) + * + * Returns: 0 for success + */ +int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) +{ + const struct net_device_ops *ops = netdev->netdev_ops; + + if (ops->ndo_fcoe_get_wwn) + return ops->ndo_fcoe_get_wwn(netdev, wwn, type); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(fcoe_get_wwn); + /** * fcoe_fc_crc() - Calculates the CRC for a given frame * @fp: The frame to be checksumed @@@ -206,11 -105,12 +206,12 @@@ u32 fcoe_fc_crc(struct fc_frame *fp for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; off = frag->page_offset; - len = frag->size; + len = skb_frag_size(frag); while (len > 0) { clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); - data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), - KM_SKB_DATA_SOFTIRQ); + data = kmap_atomic( + skb_frag_page(frag) + (off >> PAGE_SHIFT), + KM_SKB_DATA_SOFTIRQ); crc = crc32(crc, data + (off & ~PAGE_MASK), clen); kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); off += clen; diff --combined drivers/staging/Kconfig index e9aa688,2582e18..ff26324 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@@ -32,6 -32,8 +32,6 @@@ source "drivers/staging/go7007/Kconfig
source "drivers/staging/cx25821/Kconfig"
-source "drivers/staging/tm6000/Kconfig" - source "drivers/staging/cxd2099/Kconfig"
source "drivers/staging/usbip/Kconfig" @@@ -42,7 -44,7 +42,7 @@@ source "drivers/staging/wlan-ng/Kconfig
source "drivers/staging/echo/Kconfig"
- source "drivers/staging/brcm80211/Kconfig" +
source "drivers/staging/comedi/Kconfig"
@@@ -124,8 -126,6 +124,6 @@@ source "drivers/staging/quickstart/Kcon
source "drivers/staging/sbe-2t3e3/Kconfig"
- source "drivers/staging/ath6kl/Kconfig" - source "drivers/staging/keucr/Kconfig"
source "drivers/staging/bcm/Kconfig" @@@ -142,6 -142,8 +140,6 @@@ source "drivers/staging/ste_rmi4/Kconfi
source "drivers/staging/gma500/Kconfig"
-source "drivers/staging/altera-stapl/Kconfig" - source "drivers/staging/mei/Kconfig"
source "drivers/staging/nvec/Kconfig" diff --combined drivers/staging/Makefile index a2e77cf,50d112f..7f23f02 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@@ -7,14 -7,15 +7,14 @@@ obj-$(CONFIG_ET131X) += et131x obj-$(CONFIG_SLICOSS) += slicoss/ obj-$(CONFIG_VIDEO_GO7007) += go7007/ obj-$(CONFIG_VIDEO_CX25821) += cx25821/ -obj-$(CONFIG_VIDEO_TM6000) += tm6000/ obj-$(CONFIG_DVB_CXD2099) += cxd2099/ obj-$(CONFIG_LIRC_STAGING) += lirc/ obj-$(CONFIG_USBIP_CORE) += usbip/ obj-$(CONFIG_W35UND) += winbond/ obj-$(CONFIG_PRISM2_USB) += wlan-ng/ obj-$(CONFIG_ECHO) += echo/ - obj-$(CONFIG_BRCMSMAC) += brcm80211/ - obj-$(CONFIG_BRCMFMAC) += brcm80211/ + + obj-$(CONFIG_COMEDI) += comedi/ obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/ obj-$(CONFIG_ASUS_OLED) += asus_oled/ @@@ -53,12 -54,12 +53,11 @@@ obj-$(CONFIG_SOLO6X10) += solo6x10 obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge/ obj-$(CONFIG_ACPI_QUICKSTART) += quickstart/ obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/ - obj-$(CONFIG_ATH6K_LEGACY) += ath6kl/ obj-$(CONFIG_USB_ENESTORAGE) += keucr/ obj-$(CONFIG_BCM_WIMAX) += bcm/ obj-$(CONFIG_FT1000) += ft1000/ obj-$(CONFIG_SND_INTEL_SST) += intel_sst/ obj-$(CONFIG_SPEAKUP) += speakup/ -obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/ obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/ obj-$(CONFIG_DRM_PSB) += gma500/ diff --combined include/linux/mlx4/device.h index 9ab0f6a,2366f94..87cd634 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@@ -61,7 -61,6 +61,7 @@@ enum MLX4_DEV_CAP_FLAG_RC = 1LL << 0, MLX4_DEV_CAP_FLAG_UC = 1LL << 1, MLX4_DEV_CAP_FLAG_UD = 1LL << 2, + MLX4_DEV_CAP_FLAG_XRC = 1LL << 3, MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6, MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7, MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, @@@ -76,6 -75,7 +76,7 @@@ MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21, MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, + MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, MLX4_DEV_CAP_FLAG_WOL = 1LL << 38, MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, @@@ -257,8 -257,6 +258,8 @@@ struct mlx4_caps int num_qp_per_mgm; int num_pds; int reserved_pds; + int max_xrcds; + int reserved_xrcds; int mtt_entry_sz; u32 max_msg_sz; u32 page_size_cap; @@@ -502,8 -500,6 +503,8 @@@ static inline void *mlx4_buf_offset(str
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); +int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); +void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); @@@ -543,8 -539,8 +544,8 @@@ void mlx4_qp_release_range(struct mlx4_ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, - u64 db_rec, struct mlx4_srq *srq); +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, + struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq); void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark); diff --combined include/linux/pci.h index 4f62042,f1b1ca1..89f6e9d --- a/include/linux/pci.h +++ b/include/linux/pci.h @@@ -174,6 -174,8 +174,8 @@@ enum pci_dev_flags PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, /* Device configuration is irrevocably lost if disabled into D3 */ PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, + /* Provide indication device is assigned by a Virtual Machine Manager */ + PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, };
enum pci_irq_reroute_variant { @@@ -273,7 -275,6 +275,7 @@@ struct pci_dev unsigned int pme_support:5; /* Bitmask of states from which PME# can be generated */ unsigned int pme_interrupt:1; + unsigned int pme_poll:1; /* Poll device's PME status bit */ unsigned int d1_support:1; /* Low power state D1 is supported */ unsigned int d2_support:1; /* Low power state D2 is supported */ unsigned int no_d1d2:1; /* Only allow D0 and D3 */ @@@ -792,11 -793,8 +794,11 @@@ static inline int pci_is_managed(struc }
void pci_disable_device(struct pci_dev *dev); + +extern unsigned int pcibios_max_latency; void pci_set_master(struct pci_dev *dev); void pci_clear_master(struct pci_dev *dev); + int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pci_set_cacheline_size(struct pci_dev *dev); #define HAVE_PCI_SET_MWI @@@ -959,7 -957,6 +961,7 @@@ void pci_walk_bus(struct pci_bus *top, int pci_cfg_space_size_ext(struct pci_dev *dev); int pci_cfg_space_size(struct pci_dev *dev); unsigned char pci_bus_max_busnr(struct pci_bus *bus); +void pci_setup_bridge(struct pci_bus *bus);
#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) @@@ -1446,10 -1443,8 +1448,10 @@@ extern u8 pci_cache_line_size extern unsigned long pci_hotplug_io_size; extern unsigned long pci_hotplug_mem_size;
+/* Architecture specific versions may override these (weak) */ int pcibios_add_platform_entries(struct pci_dev *dev); void pcibios_disable_device(struct pci_dev *dev); +void pcibios_set_master(struct pci_dev *dev); int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
diff --combined include/linux/skbuff.h index 0f96646,94a23ff..6a6b352 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@@ -29,6 -29,7 +29,7 @@@ #include <linux/rcupdate.h> #include <linux/dmaengine.h> #include <linux/hrtimer.h> + #include <linux/dma-mapping.h>
/* Don't change this without changing skb_csum_unnecessary! */ #define CHECKSUM_NONE 0 @@@ -45,6 -46,11 +46,11 @@@ #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
+ /* return minimum truesize of one skb containing X bytes of data */ + #define SKB_TRUESIZE(X) ((X) + \ + SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + /* A. Checksumming of received packets by device. * * NONE: device failed to checksum this packet. @@@ -134,7 -140,9 +140,9 @@@ struct sk_buff typedef struct skb_frag_struct skb_frag_t;
struct skb_frag_struct { - struct page *page; + struct { + struct page *p; + } page; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) __u32 page_offset; __u32 size; @@@ -144,6 -152,26 +152,26 @@@ #endif };
+ static inline unsigned int skb_frag_size(const skb_frag_t *frag) + { + return frag->size; + } + + static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) + { + frag->size = size; + } + + static inline void skb_frag_size_add(skb_frag_t *frag, int delta) + { + frag->size += delta; + } + + static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) + { + frag->size -= delta; + } + #define HAVE_HW_TIME_STAMP
/** @@@ -322,6 -350,8 +350,8 @@@ typedef unsigned char *sk_buff_data_t * @queue_mapping: Queue mapping for multiqueue devices * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed + * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport + * ports. * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@@ -414,6 -444,7 +444,7 @@@ struct sk_buff __u8 ndisc_nodetype:2; #endif __u8 ooo_okay:1; + __u8 l4_rxhash:1; kmemcheck_bitfield_end(flags2);
/* 0/13 bit hole */ @@@ -521,6 -552,7 +552,7 @@@ static inline struct sk_buff *alloc_skb return __alloc_skb(size, priority, 1, NUMA_NO_NODE); }
+ extern void skb_recycle(struct sk_buff *skb); extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); @@@ -573,11 -605,11 +605,11 @@@ extern unsigned int skb_find_text(str unsigned int to, struct ts_config *config, struct ts_state *state);
- extern __u32 __skb_get_rxhash(struct sk_buff *skb); + extern void __skb_get_rxhash(struct sk_buff *skb); static inline __u32 skb_get_rxhash(struct sk_buff *skb) { if (!skb->rxhash) - skb->rxhash = __skb_get_rxhash(skb); + __skb_get_rxhash(skb);
return skb->rxhash; } @@@ -823,9 -855,9 +855,9 @@@ static inline struct sk_buff *skb_unsha * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */ - static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) + static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) { - struct sk_buff *list = ((struct sk_buff *)list_)->next; + struct sk_buff *list = ((const struct sk_buff *)list_)->next; if (list == (struct sk_buff *)list_) list = NULL; return list; @@@ -844,9 -876,9 +876,9 @@@ * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */ - static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) + static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) { - struct sk_buff *list = ((struct sk_buff *)list_)->prev; + struct sk_buff *list = ((const struct sk_buff *)list_)->prev; if (list == (struct sk_buff *)list_) list = NULL; return list; @@@ -1123,18 -1155,51 +1155,51 @@@ static inline int skb_pagelen(const str int i, len = 0;
for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) - len += skb_shinfo(skb)->frags[i].size; + len += skb_frag_size(&skb_shinfo(skb)->frags[i]); return len + skb_headlen(skb); }
- static inline void skb_fill_page_desc(struct sk_buff *skb, int i, - struct page *page, int off, int size) + /** + * __skb_fill_page_desc - initialise a paged fragment in an skb + * @skb: buffer containing fragment to be initialised + * @i: paged fragment index to initialise + * @page: the page to use for this fragment + * @off: the offset to the data with @page + * @size: the length of the data + * + * Initialises the @i'th fragment of @skb to point to &size bytes at + * offset @off within @page. + * + * Does not take any additional reference on the fragment. + */ + static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, + struct page *page, int off, int size) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- frag->page = page; + frag->page.p = page; frag->page_offset = off; - frag->size = size; + skb_frag_size_set(frag, size); + } + + /** + * skb_fill_page_desc - initialise a paged fragment in an skb + * @skb: buffer containing fragment to be initialised + * @i: paged fragment index to initialise + * @page: the page to use for this fragment + * @off: the offset to the data with @page + * @size: the length of the data + * + * As per __skb_fill_page_desc() -- initialises the @i'th fragment of + * @skb to point to &size bytes at offset @off within @page. In + * addition updates @skb such that @i is the last fragment. + * + * Does not take any additional reference on the fragment. + */ + static inline void skb_fill_page_desc(struct sk_buff *skb, int i, + struct page *page, int off, int size) + { + __skb_fill_page_desc(skb, i, page, off, size); skb_shinfo(skb)->nr_frags = i + 1; }
@@@ -1629,6 -1694,137 +1694,137 @@@ static inline void netdev_free_page(str }
/** + * skb_frag_page - retrieve the page refered to by a paged fragment + * @frag: the paged fragment + * + * Returns the &struct page associated with @frag. + */ + static inline struct page *skb_frag_page(const skb_frag_t *frag) + { + return frag->page.p; + } + + /** + * __skb_frag_ref - take an addition reference on a paged fragment. + * @frag: the paged fragment + * + * Takes an additional reference on the paged fragment @frag. + */ + static inline void __skb_frag_ref(skb_frag_t *frag) + { + get_page(skb_frag_page(frag)); + } + + /** + * skb_frag_ref - take an addition reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset. + * + * Takes an additional reference on the @f'th paged fragment of @skb. + */ + static inline void skb_frag_ref(struct sk_buff *skb, int f) + { + __skb_frag_ref(&skb_shinfo(skb)->frags[f]); + } + + /** + * __skb_frag_unref - release a reference on a paged fragment. + * @frag: the paged fragment + * + * Releases a reference on the paged fragment @frag. + */ + static inline void __skb_frag_unref(skb_frag_t *frag) + { + put_page(skb_frag_page(frag)); + } + + /** + * skb_frag_unref - release a reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset + * + * Releases a reference on the @f'th paged fragment of @skb. + */ + static inline void skb_frag_unref(struct sk_buff *skb, int f) + { + __skb_frag_unref(&skb_shinfo(skb)->frags[f]); + } + + /** + * skb_frag_address - gets the address of the data contained in a paged fragment + * @frag: the paged fragment buffer + * + * Returns the address of the data within @frag. The page must already + * be mapped. + */ + static inline void *skb_frag_address(const skb_frag_t *frag) + { + return page_address(skb_frag_page(frag)) + frag->page_offset; + } + + /** + * skb_frag_address_safe - gets the address of the data contained in a paged fragment + * @frag: the paged fragment buffer + * + * Returns the address of the data within @frag. Checks that the page + * is mapped and returns %NULL otherwise. + */ + static inline void *skb_frag_address_safe(const skb_frag_t *frag) + { + void *ptr = page_address(skb_frag_page(frag)); + if (unlikely(!ptr)) + return NULL; + + return ptr + frag->page_offset; + } + + /** + * __skb_frag_set_page - sets the page contained in a paged fragment + * @frag: the paged fragment + * @page: the page to set + * + * Sets the fragment @frag to contain @page. + */ + static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) + { + frag->page.p = page; + } + + /** + * skb_frag_set_page - sets the page contained in a paged fragment of an skb + * @skb: the buffer + * @f: the fragment offset + * @page: the page to set + * + * Sets the @f'th fragment of @skb to contain @page. + */ + static inline void skb_frag_set_page(struct sk_buff *skb, int f, + struct page *page) + { + __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); + } + + /** + * skb_frag_dma_map - maps a paged fragment via the DMA API + * @device: the device to map the fragment to + * @frag: the paged fragment to map + * @offset: the offset within the fragment (starting at the + * fragment's own offset) + * @size: the number of bytes to map + * @direction: the direction of the mapping (%PCI_DMA_*) + * + * Maps the page associated with @frag to @device. + */ + static inline dma_addr_t skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) + { + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); + } + + /** * skb_clone_writable - is the header of a clone writable * @skb: buffer to check * @len: length up to which to write @@@ -1636,7 -1832,7 +1832,7 @@@ * Returns true if modifying the header part of the cloned buffer * does not requires the data to be copied. */ - static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) + static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) { return !skb_header_cloned(skb) && skb_headroom(skb) + len <= skb->hdr_len; @@@ -1730,13 -1926,13 +1926,13 @@@ static inline int skb_add_data(struct s }
static inline int skb_can_coalesce(struct sk_buff *skb, int i, - struct page *page, int off) + const struct page *page, int off) { if (i) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
- return page == frag->page && - off == frag->page_offset + frag->size; + return page == skb_frag_page(frag) && + off == frag->page_offset + skb_frag_size(frag); } return 0; } @@@ -2020,13 -2216,8 +2216,13 @@@ static inline bool skb_defer_rx_timesta /** * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps * + * PHY drivers may accept clones of transmitted packets for + * timestamping via their phy_driver.txtstamp method. These drivers + * must call this function to return the skb back to the stack, with + * or without a timestamp. + * * @skb: clone of the the original outgoing packet - * @hwtstamps: hardware time stamps + * @hwtstamps: hardware time stamps, may be NULL if not available * */ void skb_complete_tx_timestamp(struct sk_buff *skb, @@@ -2262,7 -2453,8 +2458,8 @@@ static inline bool skb_warn_if_lro(cons { /* LRO sets gso_size but not gso_type, whereas if GSO is really * wanted then gso_type will be set. */ - struct skb_shared_info *shinfo = skb_shinfo(skb); + const struct skb_shared_info *shinfo = skb_shinfo(skb); + if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { __skb_warn_lro_forwarding(skb); @@@ -2286,7 -2478,7 +2483,7 @@@ static inline void skb_forward_csum(str * Instead of forcing ip_summed to CHECKSUM_NONE, we can * use this helper, to document places where we make this assertion. */ - static inline void skb_checksum_none_assert(struct sk_buff *skb) + static inline void skb_checksum_none_assert(const struct sk_buff *skb) { #ifdef DEBUG BUG_ON(skb->ip_summed != CHECKSUM_NONE); @@@ -2295,5 -2487,25 +2492,25 @@@
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
+ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) + { + if (irqs_disabled()) + return false; + + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) + return false; + + if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) + return false; + + skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); + if (skb_end_pointer(skb) - skb->head < skb_size) + return false; + + if (skb_shared(skb) || skb_cloned(skb)) + return false; + + return true; + } #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --combined kernel/sys.c index 1dbbe69,b3dfb76..5845950 --- a/kernel/sys.c +++ b/kernel/sys.c @@@ -1172,7 -1172,7 +1172,7 @@@ DECLARE_RWSEM(uts_sem) static int override_release(char __user *release, int len) { int ret = 0; - char buf[len]; + char buf[65];
if (current->personality & UNAME26) { char *rest = UTS_RELEASE; @@@ -1759,6 -1759,7 +1759,7 @@@ SYSCALL_DEFINE5(prctl, int, option, uns sizeof(me->comm) - 1) < 0) return -EFAULT; set_task_comm(me, comm); + proc_comm_connector(me); return 0; case PR_GET_NAME: get_task_comm(comm, me); diff --combined net/batman-adv/translation-table.c index ef1acfd,cc53f78..873fb3d --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@@ -183,7 -183,8 +183,8 @@@ static int tt_local_init(struct bat_pri return 1; }
- void tt_local_add(struct net_device *soft_iface, const uint8_t *addr) + void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, + int ifindex) { struct bat_priv *bat_priv = netdev_priv(soft_iface); struct tt_local_entry *tt_local_entry = NULL; @@@ -207,6 -208,8 +208,8 @@@ memcpy(tt_local_entry->addr, addr, ETH_ALEN); tt_local_entry->last_seen = jiffies; tt_local_entry->flags = NO_FLAGS; + if (is_wifi_iface(ifindex)) + tt_local_entry->flags |= TT_CLIENT_WIFI; atomic_set(&tt_local_entry->refcount, 2);
/* the batman interface mac address should never be purged */ @@@ -329,7 -332,7 +332,7 @@@ int tt_local_seq_print_text(struct seq_
rcu_read_lock(); __hlist_for_each_rcu(node, head) - buf_size += 21; + buf_size += 29; rcu_read_unlock(); }
@@@ -348,8 -351,19 +351,19 @@@ rcu_read_lock(); hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { - pos += snprintf(buff + pos, 22, " * %pM\n", - tt_local_entry->addr); + pos += snprintf(buff + pos, 30, " * %pM " + "[%c%c%c%c%c]\n", + tt_local_entry->addr, + (tt_local_entry->flags & + TT_CLIENT_ROAM ? 'R' : '.'), + (tt_local_entry->flags & + TT_CLIENT_NOPURGE ? 'P' : '.'), + (tt_local_entry->flags & + TT_CLIENT_NEW ? 'N' : '.'), + (tt_local_entry->flags & + TT_CLIENT_PENDING ? 'X' : '.'), + (tt_local_entry->flags & + TT_CLIENT_WIFI ? 'W' : '.')); } rcu_read_unlock(); } @@@ -369,8 -383,8 +383,8 @@@ static void tt_local_set_pending(struc tt_local_event(bat_priv, tt_local_entry->addr, tt_local_entry->flags | flags);
- /* The local client has to be merked as "pending to be removed" but has - * to be kept in the table in order to send it in an full tables + /* The local client has to be marked as "pending to be removed" but has + * to be kept in the table in order to send it in a full table * response issued before the net ttvn increment (consistency check) */ tt_local_entry->flags |= TT_CLIENT_PENDING; } @@@ -495,7 -509,8 +509,8 @@@ static void tt_changes_list_free(struc
/* caller must hold orig_node refcount */ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_addr, uint8_t ttvn, bool roaming) + const unsigned char *tt_addr, uint8_t ttvn, bool roaming, + bool wifi) { struct tt_global_entry *tt_global_entry; struct orig_node *orig_node_tmp; @@@ -537,6 -552,9 +552,9 @@@ tt_global_entry->roam_at = 0; }
+ if (wifi) + tt_global_entry->flags |= TT_CLIENT_WIFI; + bat_dbg(DBG_TT, bat_priv, "Creating new global tt entry: %pM (via %pM)\n", tt_global_entry->addr, orig_node->orig); @@@ -582,8 -600,8 +600,8 @@@ int tt_global_seq_print_text(struct seq seq_printf(seq, "Globally announced TT entries received via the mesh %s\n", net_dev->name); - seq_printf(seq, " %-13s %s %-15s %s\n", - "Client", "(TTVN)", "Originator", "(Curr TTVN)"); + seq_printf(seq, " %-13s %s %-15s %s %s\n", + "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
buf_size = 1; /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via @@@ -593,7 -611,7 +611,7 @@@
rcu_read_lock(); __hlist_for_each_rcu(node, head) - buf_size += 59; + buf_size += 67; rcu_read_unlock(); }
@@@ -612,14 -630,20 +630,20 @@@ rcu_read_lock(); hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) { - pos += snprintf(buff + pos, 61, - " * %pM (%3u) via %pM (%3u)\n", - tt_global_entry->addr, + pos += snprintf(buff + pos, 69, + " * %pM (%3u) via %pM (%3u) " + "[%c%c%c]\n", tt_global_entry->addr, tt_global_entry->ttvn, tt_global_entry->orig_node->orig, (uint8_t) atomic_read( &tt_global_entry->orig_node-> - last_ttvn)); + last_ttvn), + (tt_global_entry->flags & + TT_CLIENT_ROAM ? 'R' : '.'), + (tt_global_entry->flags & + TT_CLIENT_PENDING ? 'X' : '.'), + (tt_global_entry->flags & + TT_CLIENT_WIFI ? 'W' : '.')); } rcu_read_unlock(); } @@@ -774,30 -798,56 +798,56 @@@ static void tt_global_table_free(struc bat_priv->tt_global_hash = NULL; }
+ static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry, + struct tt_global_entry *tt_global_entry) + { + bool ret = false; + + if (tt_local_entry->flags & TT_CLIENT_WIFI && + tt_global_entry->flags & TT_CLIENT_WIFI) + ret = true; + + return ret; + } + struct orig_node *transtable_search(struct bat_priv *bat_priv, - const uint8_t *addr) + const uint8_t *src, const uint8_t *addr) { - struct tt_global_entry *tt_global_entry; + struct tt_local_entry *tt_local_entry = NULL; + struct tt_global_entry *tt_global_entry = NULL; struct orig_node *orig_node = NULL;
- tt_global_entry = tt_global_hash_find(bat_priv, addr); + if (src && atomic_read(&bat_priv->ap_isolation)) { + tt_local_entry = tt_local_hash_find(bat_priv, src); + if (!tt_local_entry) + goto out; + }
+ tt_global_entry = tt_global_hash_find(bat_priv, addr); if (!tt_global_entry) goto out;
+ /* check whether the clients should not communicate due to AP + * isolation */ + if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) + goto out; + if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) - goto free_tt; + goto out;
/* A global client marked as PENDING has already moved from that * originator */ if (tt_global_entry->flags & TT_CLIENT_PENDING) - goto free_tt; + goto out;
orig_node = tt_global_entry->orig_node;
- free_tt: - tt_global_entry_free_ref(tt_global_entry); out: + if (tt_global_entry) + tt_global_entry_free_ref(tt_global_entry); + if (tt_local_entry) + tt_local_entry_free_ref(tt_local_entry); + return orig_node; }
@@@ -999,6 -1049,7 +1049,6 @@@ static struct sk_buff *tt_response_fill tt_response = (struct tt_query_packet *)skb_put(skb, tt_query_size + tt_len); tt_response->ttvn = ttvn; - tt_response->tt_data = htons(tt_tot);
tt_change = (struct tt_change *)(skb->data + tt_query_size); tt_count = 0; @@@ -1024,16 -1075,13 +1074,17 @@@ } rcu_read_unlock();
+ /* store in the message the number of entries we have successfully + * copied */ + tt_response->tt_data = htons(tt_count); + out: return skb; }
- int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node, - uint8_t ttvn, uint16_t tt_crc, bool full_table) + static int send_tt_request(struct bat_priv *bat_priv, + struct orig_node *dst_orig_node, + uint8_t ttvn, uint16_t tt_crc, bool full_table) { struct sk_buff *skb = NULL; struct tt_query_packet *tt_request; @@@ -1140,12 -1188,12 +1191,12 @@@ static bool send_other_tt_response(stru orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); req_ttvn = tt_request->ttvn;
- /* I have not the requested data */ + /* I don't have the requested data */ if (orig_ttvn != req_ttvn || tt_request->tt_data != req_dst_orig_node->tt_crc) goto out;
- /* If it has explicitly been requested the full table */ + /* If the full table has been explicitly requested */ if (tt_request->flags & TT_FULL_TABLE || !req_dst_orig_node->tt_buff) full_table = true; @@@ -1366,7 -1414,9 +1417,9 @@@ static void _tt_update_changes(struct b (tt_change + i)->flags & TT_CLIENT_ROAM); else if (!tt_global_add(bat_priv, orig_node, - (tt_change + i)->addr, ttvn, false)) + (tt_change + i)->addr, ttvn, false, + (tt_change + i)->flags & + TT_CLIENT_WIFI)) /* In case of problem while storing a * global_entry, we stop the updating * procedure without committing the @@@ -1406,9 -1456,10 +1459,10 @@@ out orig_node_free_ref(orig_node); }
- void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node, - uint16_t tt_num_changes, uint8_t ttvn, - struct tt_change *tt_change) + static void tt_update_changes(struct bat_priv *bat_priv, + struct orig_node *orig_node, + uint16_t tt_num_changes, uint8_t ttvn, + struct tt_change *tt_change) { _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, ttvn); @@@ -1671,8 -1722,6 +1725,8 @@@ static void tt_local_reset_flags(struc rcu_read_lock(); hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { + if (!(tt_local_entry->flags & flags)) + continue; tt_local_entry->flags &= ~flags; atomic_inc(&bat_priv->num_local_tt); } @@@ -1725,3 -1774,90 +1779,90 @@@ void tt_commit_changes(struct bat_priv atomic_inc(&bat_priv->ttvn); bat_priv->tt_poss_change = false; } + + bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) + { + struct tt_local_entry *tt_local_entry = NULL; + struct tt_global_entry *tt_global_entry = NULL; + bool ret = true; + + if (!atomic_read(&bat_priv->ap_isolation)) + return false; + + tt_local_entry = tt_local_hash_find(bat_priv, dst); + if (!tt_local_entry) + goto out; + + tt_global_entry = tt_global_hash_find(bat_priv, src); + if (!tt_global_entry) + goto out; + + if (_is_ap_isolated(tt_local_entry, tt_global_entry)) + goto out; + + ret = false; + + out: + if (tt_global_entry) + tt_global_entry_free_ref(tt_global_entry); + if (tt_local_entry) + tt_local_entry_free_ref(tt_local_entry); + return ret; + } + + void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_buff, uint8_t tt_num_changes, + uint8_t ttvn, uint16_t tt_crc) + { + uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); + bool full_table = true; + + /* the ttvn increased by one -> we can apply the attached changes */ + if (ttvn - orig_ttvn == 1) { + /* the OGM could not contain the changes due to their size or + * because they have already been sent TT_OGM_APPEND_MAX times. + * In this case send a tt request */ + if (!tt_num_changes) { + full_table = false; + goto request_table; + } + + tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn, + (struct tt_change *)tt_buff); + + /* Even if we received the precomputed crc with the OGM, we + * prefer to recompute it to spot any possible inconsistency + * in the global table */ + orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); + + /* The ttvn alone is not enough to guarantee consistency + * because a single value could represent different states + * (due to the wrap around). Thus a node has to check whether + * the resulting table (after applying the changes) is still + * consistent or not. E.g. a node could disconnect while its + * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case + * checking the CRC value is mandatory to detect the + * inconsistency */ + if (orig_node->tt_crc != tt_crc) + goto request_table; + + /* Roaming phase is over: tables are in sync again. I can + * unset the flag */ + orig_node->tt_poss_change = false; + } else { + /* if we missed more than one change or our tables are not + * in sync anymore -> request fresh tt data */ + if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { + request_table: + bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. " + "Need to retrieve the correct information " + "(ttvn: %u last_ttvn: %u crc: %u last_crc: " + "%u num_changes: %u)\n", orig_node->orig, ttvn, + orig_ttvn, tt_crc, orig_node->tt_crc, + tt_num_changes); + send_tt_request(bat_priv, orig_node, ttvn, tt_crc, + full_table); + return; + } + } + } diff --combined net/bridge/br_if.c index 1d420f6,c3b77dc..f603e5b --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@@ -13,6 -13,7 +13,7 @@@
#include <linux/kernel.h> #include <linux/netdevice.h> + #include <linux/etherdevice.h> #include <linux/netpoll.h> #include <linux/ethtool.h> #include <linux/if_arp.h> @@@ -33,20 -34,18 +34,18 @@@ */ static int port_cost(struct net_device *dev) { - if (dev->ethtool_ops && dev->ethtool_ops->get_settings) { - struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, }; - - if (!dev_ethtool_get_settings(dev, &ecmd)) { - switch (ethtool_cmd_speed(&ecmd)) { - case SPEED_10000: - return 2; - case SPEED_1000: - return 4; - case SPEED_100: - return 19; - case SPEED_10: - return 100; - } + struct ethtool_cmd ecmd; + + if (!__ethtool_get_settings(dev, &ecmd)) { + switch (ethtool_cmd_speed(&ecmd)) { + case SPEED_10000: + return 2; + case SPEED_1000: + return 4; + case SPEED_100: + return 19; + case SPEED_10: + return 100; } }
@@@ -161,10 -160,9 +160,10 @@@ static void del_nbp(struct net_bridge_p call_rcu(&p->rcu, destroy_nbp_rcu); }
-/* called with RTNL */ -static void del_br(struct net_bridge *br, struct list_head *head) +/* Delete bridge device */ +void br_dev_delete(struct net_device *dev, struct list_head *head) { + struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p, *n;
list_for_each_entry_safe(p, n, &br->port_list, list) { @@@ -269,7 -267,7 +268,7 @@@ int br_del_bridge(struct net *net, cons }
else - del_br(netdev_priv(dev), NULL); + br_dev_delete(dev, NULL);
rtnl_unlock(); return ret; @@@ -325,7 -323,8 +324,8 @@@ int br_add_if(struct net_bridge *br, st
/* Don't allow bridging non-ethernet like devices */ if ((dev->flags & IFF_LOOPBACK) || - dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN) + dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN || + !is_valid_ether_addr(dev->dev_addr)) return -EINVAL;
/* No bridging of bridges */ @@@ -353,10 -352,6 +353,6 @@@ err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), SYSFS_BRIDGE_PORT_ATTR); if (err) - goto err0; - - err = br_fdb_insert(br, p, dev->dev_addr); - if (err) goto err1;
err = br_sysfs_addif(p); @@@ -397,6 -392,9 +393,9 @@@
dev_set_mtu(br->dev, br_min_mtu(br));
+ if (br_fdb_insert(br, p, dev->dev_addr)) + netdev_err(dev, "failed insert local address bridge forwarding table\n"); + kobject_uevent(&p->kobj, KOBJ_ADD);
return 0; @@@ -406,11 -404,9 +405,9 @@@ err4 err3: sysfs_remove_link(br->ifobj, p->dev->name); err2: - br_fdb_delete_by_port(br, p, 1); - err1: kobject_put(&p->kobj); p = NULL; /* kobject_put frees */ - err0: + err1: dev_set_promiscuity(dev, -1); put_back: dev_put(dev); @@@ -450,7 -446,7 +447,7 @@@ void __net_exit br_net_exit(struct net rtnl_lock(); for_each_netdev(net, dev) if (dev->priv_flags & IFF_EBRIDGE) - del_br(netdev_priv(dev), &list); + br_dev_delete(dev, &list);
unregister_netdevice_many(&list); rtnl_unlock(); diff --combined net/bridge/br_private.h index 857a021,a248fe6..d7d6fb0 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@@ -29,6 -29,11 +29,11 @@@
#define BR_VERSION "2.3"
+ /* Control of forwarding link local multicast */ + #define BR_GROUPFWD_DEFAULT 0 + /* Don't allow forwarding control protocols like STP and LLDP */ + #define BR_GROUPFWD_RESTRICTED 0x4007u + /* Path to usermode spanning tree program */ #define BR_STP_PROG "/sbin/bridge-stp"
@@@ -193,6 -198,8 +198,8 @@@ struct net_bridg unsigned long flags; #define BR_SET_MAC_ADDR 0x00000001
+ u16 group_fwd_mask; + /* STP */ bridge_id designated_root; bridge_id bridge_id; @@@ -294,7 -301,6 +301,7 @@@ static inline int br_is_root_bridge(con
/* br_device.c */ extern void br_dev_setup(struct net_device *dev); +extern void br_dev_delete(struct net_device *dev, struct list_head *list); extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER diff --combined net/core/dev.c index ae5cf2d,ad5d702..b7ba81a --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -133,6 -133,10 +133,10 @@@ #include <linux/pci.h> #include <linux/inetdevice.h> #include <linux/cpu_rmap.h> + #include <linux/if_tunnel.h> + #include <linux/if_pppox.h> + #include <linux/ppp_defs.h> + #include <linux/net_tstamp.h>
#include "net-sysfs.h"
@@@ -1474,6 -1478,57 +1478,57 @@@ static inline void net_timestamp_check( __net_timestamp(skb); }
+ static int net_hwtstamp_validate(struct ifreq *ifr) + { + struct hwtstamp_config cfg; + enum hwtstamp_tx_types tx_type; + enum hwtstamp_rx_filters rx_filter; + int tx_type_valid = 0; + int rx_filter_valid = 0; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + if (cfg.flags) /* reserved for future extensions */ + return -EINVAL; + + tx_type = cfg.tx_type; + rx_filter = cfg.rx_filter; + + switch (tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + case HWTSTAMP_TX_ONESTEP_SYNC: + tx_type_valid = 1; + break; + } + + switch (rx_filter) { + case HWTSTAMP_FILTER_NONE: + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + rx_filter_valid = 1; + break; + } + + if (!tx_type_valid || !rx_filter_valid) + return -ERANGE; + + return 0; + } + static inline bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) { @@@ -1955,9 -2010,11 +2010,11 @@@ static int illegal_highdma(struct net_d #ifdef CONFIG_HIGHMEM int i; if (!(dev->features & NETIF_F_HIGHDMA)) { - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) - if (PageHighMem(skb_shinfo(skb)->frags[i].page)) + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + if (PageHighMem(skb_frag_page(frag))) return 1; + } }
if (PCI_DMA_BUS_IS_PHYS) { @@@ -1966,7 -2023,8 +2023,8 @@@ if (!pdev) return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t addr = page_to_phys(skb_frag_page(frag)); if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) return 1; } @@@ -2527,25 -2585,31 +2585,31 @@@ static inline void ____napi_schedule(st
/* * __skb_get_rxhash: calculate a flow hash based on src/dst addresses - * and src/dst port numbers. Returns a non-zero hash number on success - * and 0 on failure. + * and src/dst port numbers. Sets rxhash in skb to non-zero hash value + * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb + * if hash is a canonical 4-tuple hash over transport ports. */ - __u32 __skb_get_rxhash(struct sk_buff *skb) + void __skb_get_rxhash(struct sk_buff *skb) { int nhoff, hash = 0, poff; const struct ipv6hdr *ip6; const struct iphdr *ip; + const struct vlan_hdr *vlan; u8 ip_proto; - u32 addr1, addr2, ihl; + u32 addr1, addr2; + u16 proto; union { u32 v32; u16 v16[2]; } ports;
nhoff = skb_network_offset(skb); + proto = skb->protocol;
- switch (skb->protocol) { + again: + switch (proto) { case __constant_htons(ETH_P_IP): + ip: if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) goto done;
@@@ -2556,9 -2620,10 +2620,10 @@@ ip_proto = ip->protocol; addr1 = (__force u32) ip->saddr; addr2 = (__force u32) ip->daddr; - ihl = ip->ihl; + nhoff += ip->ihl * 4; break; case __constant_htons(ETH_P_IPV6): + ipv6: if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) goto done;
@@@ -2566,20 -2631,71 +2631,71 @@@ ip_proto = ip6->nexthdr; addr1 = (__force u32) ip6->saddr.s6_addr32[3]; addr2 = (__force u32) ip6->daddr.s6_addr32[3]; - ihl = (40 >> 2); + nhoff += 40; break; + case __constant_htons(ETH_P_8021Q): + if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff)) + goto done; + vlan = (const struct vlan_hdr *) (skb->data + nhoff); + proto = vlan->h_vlan_encapsulated_proto; + nhoff += sizeof(*vlan); + goto again; + case __constant_htons(ETH_P_PPP_SES): + if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff)) + goto done; + proto = *((__be16 *) (skb->data + nhoff + + sizeof(struct pppoe_hdr))); + nhoff += PPPOE_SES_HLEN; + switch (proto) { + case __constant_htons(PPP_IP): + goto ip; + case __constant_htons(PPP_IPV6): + goto ipv6; + default: + goto done; + } default: goto done; }
+ switch (ip_proto) { + case IPPROTO_GRE: + if (pskb_may_pull(skb, nhoff + 16)) { + u8 *h = skb->data + nhoff; + __be16 flags = *(__be16 *)h; + + /* + * Only look inside GRE if version zero and no + * routing + */ + if (!(flags & (GRE_VERSION|GRE_ROUTING))) { + proto = *(__be16 *)(h + 2); + nhoff += 4; + if (flags & GRE_CSUM) + nhoff += 4; + if (flags & GRE_KEY) + nhoff += 4; + if (flags & GRE_SEQ) + nhoff += 4; + goto again; + } + } + break; + case IPPROTO_IPIP: + goto again; + default: + break; + } + ports.v32 = 0; poff = proto_ports_offset(ip_proto); if (poff >= 0) { - nhoff += ihl * 4 + poff; + nhoff += poff; if (pskb_may_pull(skb, nhoff + 4)) { ports.v32 = * (__force u32 *) (skb->data + nhoff); if (ports.v16[1] < ports.v16[0]) swap(ports.v16[0], ports.v16[1]); + skb->l4_rxhash = 1; } }
@@@ -2592,7 -2708,7 +2708,7 @@@ hash = 1;
done: - return hash; + skb->rxhash = hash; } EXPORT_SYMBOL(__skb_get_rxhash);
@@@ -2606,10 -2722,7 +2722,7 @@@ static struct rps_dev_flow set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow *rflow, u16 next_cpu) { - u16 tcpu; - - tcpu = rflow->cpu = next_cpu; - if (tcpu != RPS_NO_CPU) { + if (next_cpu != RPS_NO_CPU) { #ifdef CONFIG_RFS_ACCEL struct netdev_rx_queue *rxqueue; struct rps_dev_flow_table *flow_table; @@@ -2637,16 -2750,16 +2750,16 @@@ goto out; old_rflow = rflow; rflow = &flow_table->flows[flow_id]; - rflow->cpu = next_cpu; rflow->filter = rc; if (old_rflow->filter == rflow->filter) old_rflow->filter = RPS_NO_FILTER; out: #endif rflow->last_qtail = - per_cpu(softnet_data, tcpu).input_queue_head; + per_cpu(softnet_data, next_cpu).input_queue_head; }
+ rflow->cpu = next_cpu; return rflow; }
@@@ -2681,13 -2794,13 +2794,13 @@@ static int get_rps_cpu(struct net_devic map = rcu_dereference(rxqueue->rps_map); if (map) { if (map->len == 1 && - !rcu_dereference_raw(rxqueue->rps_flow_table)) { + !rcu_access_pointer(rxqueue->rps_flow_table)) { tcpu = map->cpus[0]; if (cpu_online(tcpu)) cpu = tcpu; goto done; } - } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { + } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { goto done; }
@@@ -3102,8 -3215,8 +3215,8 @@@ void netdev_rx_handler_unregister(struc {
ASSERT_RTNL(); - rcu_assign_pointer(dev->rx_handler, NULL); - rcu_assign_pointer(dev->rx_handler_data, NULL); + RCU_INIT_POINTER(dev->rx_handler, NULL); + RCU_INIT_POINTER(dev->rx_handler_data, NULL); } EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
@@@ -3170,6 -3283,17 +3283,17 @@@ another_round ncls: #endif
+ if (vlan_tx_tag_present(skb)) { + if (pt_prev) { + ret = deliver_skb(skb, pt_prev, orig_dev); + pt_prev = NULL; + } + if (vlan_do_receive(&skb)) + goto another_round; + else if (unlikely(!skb)) + goto out; + } + rx_handler = rcu_dereference(skb->dev->rx_handler); if (rx_handler) { if (pt_prev) { @@@ -3190,18 -3314,6 +3314,6 @@@ } }
- if (vlan_tx_tag_present(skb)) { - if (pt_prev) { - ret = deliver_skb(skb, pt_prev, orig_dev); - pt_prev = NULL; - } - if (vlan_do_receive(&skb)) { - ret = __netif_receive_skb(skb); - goto out; - } else if (unlikely(!skb)) - goto out; - } - /* deliver only exact match when indicated */ null_or_dev = deliver_exact ? skb->dev : NULL;
@@@ -3429,10 -3541,10 +3541,10 @@@ pull skb->data_len -= grow;
skb_shinfo(skb)->frags[0].page_offset += grow; - skb_shinfo(skb)->frags[0].size -= grow; + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
- if (unlikely(!skb_shinfo(skb)->frags[0].size)) { - put_page(skb_shinfo(skb)->frags[0].page); + if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) { + skb_frag_unref(skb, 0); memmove(skb_shinfo(skb)->frags, skb_shinfo(skb)->frags + 1, --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); @@@ -3496,11 -3608,10 +3608,10 @@@ void skb_gro_reset_offset(struct sk_buf NAPI_GRO_CB(skb)->frag0_len = 0;
if (skb->mac_header == skb->tail && - !PageHighMem(skb_shinfo(skb)->frags[0].page)) { + !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) { NAPI_GRO_CB(skb)->frag0 = - page_address(skb_shinfo(skb)->frags[0].page) + - skb_shinfo(skb)->frags[0].page_offset; - NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size; + skb_frag_address(&skb_shinfo(skb)->frags[0]); + NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]); } } EXPORT_SYMBOL(skb_gro_reset_offset); @@@ -3982,6 -4093,60 +4093,60 @@@ static int dev_ifconf(struct net *net, }
#ifdef CONFIG_PROC_FS + + #define BUCKET_SPACE (32 - NETDEV_HASHBITS) + + struct dev_iter_state { + struct seq_net_private p; + unsigned int pos; /* bucket << BUCKET_SPACE + offset */ + }; + + #define get_bucket(x) ((x) >> BUCKET_SPACE) + #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) + #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) + + static inline struct net_device *dev_from_same_bucket(struct seq_file *seq) + { + struct dev_iter_state *state = seq->private; + struct net *net = seq_file_net(seq); + struct net_device *dev; + struct hlist_node *p; + struct hlist_head *h; + unsigned int count, bucket, offset; + + bucket = get_bucket(state->pos); + offset = get_offset(state->pos); + h = &net->dev_name_head[bucket]; + count = 0; + hlist_for_each_entry_rcu(dev, p, h, name_hlist) { + if (count++ == offset) { + state->pos = set_bucket_offset(bucket, count); + return dev; + } + } + + return NULL; + } + + static inline struct net_device *dev_from_new_bucket(struct seq_file *seq) + { + struct dev_iter_state *state = seq->private; + struct net_device *dev; + unsigned int bucket; + + bucket = get_bucket(state->pos); + do { + dev = dev_from_same_bucket(seq); + if (dev) + return dev; + + bucket++; + state->pos = set_bucket_offset(bucket, 0); + } while (bucket < NETDEV_HASHENTRIES); + + return NULL; + } + /* * This is invoked by the /proc filesystem handler to display a device * in detail. @@@ -3989,33 -4154,33 +4154,33 @@@ void *dev_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { - struct net *net = seq_file_net(seq); - loff_t off; - struct net_device *dev; + struct dev_iter_state *state = seq->private;
rcu_read_lock(); if (!*pos) return SEQ_START_TOKEN;
- off = 1; - for_each_netdev_rcu(net, dev) - if (off++ == *pos) - return dev; + /* check for end of the hash */ + if (state->pos == 0 && *pos > 1) + return NULL;
- return NULL; + return dev_from_new_bucket(seq); }
void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct net_device *dev = v; + struct net_device *dev; + + ++*pos;
if (v == SEQ_START_TOKEN) - dev = first_net_device_rcu(seq_file_net(seq)); - else - dev = next_net_device_rcu(dev); + return dev_from_new_bucket(seq);
- ++*pos; - return dev; + dev = dev_from_same_bucket(seq); + if (dev) + return dev; + + return dev_from_new_bucket(seq); }
void dev_seq_stop(struct seq_file *seq, void *v) @@@ -4114,7 -4279,7 +4279,7 @@@ static const struct seq_operations dev_ static int dev_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &dev_seq_ops, - sizeof(struct seq_net_private)); + sizeof(struct dev_iter_state)); }
static const struct file_operations dev_seq_fops = { @@@ -4497,9 -4662,7 +4662,7 @@@ void __dev_set_rx_mode(struct net_devic if (!netif_device_present(dev)) return;
- if (ops->ndo_set_rx_mode) - ops->ndo_set_rx_mode(dev); - else { + if (!(dev->priv_flags & IFF_UNICAST_FLT)) { /* Unicast addresses changes may only happen under the rtnl, * therefore calling __dev_set_promiscuity here is safe. */ @@@ -4510,10 -4673,10 +4673,10 @@@ __dev_set_promiscuity(dev, -1); dev->uc_promisc = false; } - - if (ops->ndo_set_multicast_list) - ops->ndo_set_multicast_list(dev); } + + if (ops->ndo_set_rx_mode) + ops->ndo_set_rx_mode(dev); }
void dev_set_rx_mode(struct net_device *dev) @@@ -4524,30 -4687,6 +4687,6 @@@ }
/** - * dev_ethtool_get_settings - call device's ethtool_ops::get_settings() - * @dev: device - * @cmd: memory area for ethtool_ops::get_settings() result - * - * The cmd arg is initialized properly (cleared and - * ethtool_cmd::cmd field set to ETHTOOL_GSET). - * - * Return device's ethtool_ops::get_settings() result value or - * -EOPNOTSUPP when device doesn't expose - * ethtool_ops::get_settings() operation. - */ - int dev_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) - { - if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings) - return -EOPNOTSUPP; - - memset(cmd, 0, sizeof(struct ethtool_cmd)); - cmd->cmd = ETHTOOL_GSET; - return dev->ethtool_ops->get_settings(dev, cmd); - } - EXPORT_SYMBOL(dev_ethtool_get_settings); - - /** * dev_get_flags - get flags reported to userspace * @dev: device * @@@ -4863,7 -5002,7 +5002,7 @@@ static int dev_ifsioc(struct net *net, return -EOPNOTSUPP;
case SIOCADDMULTI: - if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || + if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) @@@ -4871,7 -5010,7 +5010,7 @@@ return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
case SIOCDELMULTI: - if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || + if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) @@@ -4888,6 -5027,12 +5027,12 @@@ ifr->ifr_newname[IFNAMSIZ-1] = '\0'; return dev_change_name(dev, ifr->ifr_newname);
+ case SIOCSHWTSTAMP: + err = net_hwtstamp_validate(ifr); + if (err) + return err; + /* fall through */ + /* * Unknown or private ioctl */ @@@ -5202,7 -5347,7 +5347,7 @@@ static void rollback_registered_many(st dev = list_first_entry(head, struct net_device, unreg_list); call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
- rcu_barrier(); + synchronize_net();
list_for_each_entry(dev, head, unreg_list) dev_put(dev); @@@ -5715,6 -5860,12 +5860,12 @@@ void netdev_run_todo(void
__rtnl_unlock();
+ /* Wait for rcu callbacks to finish before attempting to drain + * the device list. This usually avoids a 250ms wait. + */ + if (!list_empty(&list)) + rcu_barrier(); + while (!list_empty(&list)) { struct net_device *dev = list_first_entry(&list, struct net_device, todo_list); @@@ -5735,8 -5886,8 +5886,8 @@@
/* paranoia */ BUG_ON(netdev_refcnt_read(dev)); - WARN_ON(rcu_dereference_raw(dev->ip_ptr)); - WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); + WARN_ON(rcu_access_pointer(dev->ip_ptr)); + WARN_ON(rcu_access_pointer(dev->ip6_ptr)); WARN_ON(dev->dn_ptr);
if (dev->destructor) @@@ -5940,7 -6091,7 +6091,7 @@@ void free_netdev(struct net_device *dev kfree(dev->_rx); #endif
- kfree(rcu_dereference_raw(dev->ingress_queue)); + kfree(rcu_dereference_protected(dev->ingress_queue, 1));
/* Flush device addresses */ dev_addr_flush(dev); @@@ -6115,7 -6266,6 +6266,7 @@@ int dev_change_net_namespace(struct net */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); + rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
/* * Flush the unicast and multicast chains diff --combined net/core/fib_rules.c index 27071ee,38be474..57e8f95 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@@ -475,11 -475,8 +475,11 @@@ static int fib_nl_delrule(struct sk_buf
list_del_rcu(&rule->list);
- if (rule->action == FR_ACT_GOTO) + if (rule->action == FR_ACT_GOTO) { ops->nr_goto_rules--; + if (rtnl_dereference(rule->ctarget) == NULL) + ops->unresolved_rules--; + }
/* * Check if this rule is a target to any of them. If so, @@@ -490,7 -487,7 +490,7 @@@ if (ops->nr_goto_rules > 0) { list_for_each_entry(tmp, &ops->rules_list, list) { if (rtnl_dereference(tmp->ctarget) == rule) { - rcu_assign_pointer(tmp->ctarget, NULL); + RCU_INIT_POINTER(tmp->ctarget, NULL); ops->unresolved_rules++; } } @@@ -548,7 -545,7 +548,7 @@@ static int fib_nl_fill_rule(struct sk_b frh->flags = rule->flags;
if (rule->action == FR_ACT_GOTO && - rcu_dereference_raw(rule->ctarget) == NULL) + rcu_access_pointer(rule->ctarget) == NULL) frh->flags |= FIB_RULE_UNRESOLVED;
if (rule->iifname[0]) { diff --combined net/ipv4/route.c index 41557e2,26c77e1..1082460 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@@ -120,7 -120,6 +120,6 @@@
static int ip_rt_max_size; static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; - static int ip_rt_gc_interval __read_mostly = 60 * HZ; static int ip_rt_gc_min_interval __read_mostly = HZ / 2; static int ip_rt_redirect_number __read_mostly = 9; static int ip_rt_redirect_load __read_mostly = HZ / 50; @@@ -324,7 -323,7 +323,7 @@@ static struct rtable *rt_cache_get_firs struct rtable *r = NULL;
for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { - if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)) + if (!rcu_access_pointer(rt_hash_table[st->bucket].chain)) continue; rcu_read_lock_bh(); r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); @@@ -350,7 -349,7 +349,7 @@@ static struct rtable *__rt_cache_get_ne do { if (--st->bucket < 0) return NULL; - } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)); + } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain)); rcu_read_lock_bh(); r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); } @@@ -761,7 -760,7 +760,7 @@@ static void rt_do_flush(struct net *net
if (process_context && need_resched()) cond_resched(); - rth = rcu_dereference_raw(rt_hash_table[i].chain); + rth = rcu_access_pointer(rt_hash_table[i].chain); if (!rth) continue;
@@@ -1309,12 -1308,7 +1308,12 @@@ static void rt_del(unsigned hash, struc void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, __be32 saddr, struct net_device *dev) { + int s, i; struct in_device *in_dev = __in_dev_get_rcu(dev); + struct rtable *rt; + __be32 skeys[2] = { saddr, 0 }; + int ikeys[2] = { dev->ifindex, 0 }; + struct flowi4 fl4; struct inet_peer *peer; struct net *net;
@@@ -1337,34 -1331,13 +1336,34 @@@ goto reject_redirect; }
- peer = inet_getpeer_v4(daddr, 1); - if (peer) { - peer->redirect_learned.a4 = new_gw; + memset(&fl4, 0, sizeof(fl4)); + fl4.daddr = daddr; + for (s = 0; s < 2; s++) { + for (i = 0; i < 2; i++) { + fl4.flowi4_oif = ikeys[i]; + fl4.saddr = skeys[s]; + rt = __ip_route_output_key(net, &fl4); + if (IS_ERR(rt)) + continue; + + if (rt->dst.error || rt->dst.dev != dev || + rt->rt_gateway != old_gw) { + ip_rt_put(rt); + continue; + }
- inet_putpeer(peer); + if (!rt->peer) + rt_bind_peer(rt, rt->rt_dst, 1);
- atomic_inc(&__rt_peer_genid); + peer = rt->peer; + if (peer) { + peer->redirect_learned.a4 = new_gw; + atomic_inc(&__rt_peer_genid); + } + + ip_rt_put(rt); + return; + } } return;
@@@ -3147,13 -3120,6 +3146,6 @@@ static ctl_table ipv4_route_table[] = .proc_handler = proc_dointvec_jiffies, }, { - .procname = "gc_interval", - .data = &ip_rt_gc_interval, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, - { .procname = "redirect_load", .data = &ip_rt_redirect_load, .maxlen = sizeof(int), diff --combined net/ipv4/tcp_minisocks.c index 0ce3d06,b767a95..85a2fbe --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@@ -141,7 -141,7 +141,7 @@@ tcp_timewait_state_process(struct inet_ const struct tcphdr *th) { struct tcp_options_received tmp_opt; - u8 *hash_location; + const u8 *hash_location; struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); int paws_reject = 0;
@@@ -328,7 -328,6 +328,7 @@@ void tcp_time_wait(struct sock *sk, in struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
+ tw->tw_transparent = inet_sk(sk)->transparent; tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; tcptw->tw_rcv_nxt = tp->rcv_nxt; tcptw->tw_snd_nxt = tp->snd_nxt; @@@ -567,7 -566,7 +567,7 @@@ struct sock *tcp_check_req(struct sock struct request_sock **prev) { struct tcp_options_received tmp_opt; - u8 *hash_location; + const u8 *hash_location; struct sock *child; const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); diff --combined net/sunrpc/auth_gss/auth_gss.c index e9b7693,d413275..afb5655 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@@ -122,7 -122,7 +122,7 @@@ gss_cred_set_ctx(struct rpc_cred *cred if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) return; gss_get_ctx(ctx); - rcu_assign_pointer(gss_cred->gc_ctx, ctx); + RCU_INIT_POINTER(gss_cred->gc_ctx, ctx); set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); smp_mb__before_clear_bit(); clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); @@@ -603,6 -603,26 +603,6 @@@ out return err; }
-static ssize_t -gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg, - char __user *dst, size_t buflen) -{ - char *data = (char *)msg->data + msg->copied; - size_t mlen = min(msg->len, buflen); - unsigned long left; - - left = copy_to_user(dst, data, mlen); - if (left == mlen) { - msg->errno = -EFAULT; - return -EFAULT; - } - - mlen -= left; - msg->copied += mlen; - msg->errno = 0; - return mlen; -} - #define MSG_BUF_MAXSIZE 1024
static ssize_t @@@ -950,7 -970,7 +950,7 @@@ gss_destroy_nullcred(struct rpc_cred *c struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
- rcu_assign_pointer(gss_cred->gc_ctx, NULL); + RCU_INIT_POINTER(gss_cred->gc_ctx, NULL); call_rcu(&cred->cr_rcu, gss_free_cred_callback); if (ctx) gss_put_ctx(ctx); @@@ -1570,7 -1590,7 +1570,7 @@@ static const struct rpc_credops gss_nul };
static const struct rpc_pipe_ops gss_upcall_ops_v0 = { - .upcall = gss_pipe_upcall, + .upcall = rpc_pipe_generic_upcall, .downcall = gss_pipe_downcall, .destroy_msg = gss_pipe_destroy_msg, .open_pipe = gss_pipe_open_v0, @@@ -1578,7 -1598,7 +1578,7 @@@ };
static const struct rpc_pipe_ops gss_upcall_ops_v1 = { - .upcall = gss_pipe_upcall, + .upcall = rpc_pipe_generic_upcall, .downcall = gss_pipe_downcall, .destroy_msg = gss_pipe_destroy_msg, .open_pipe = gss_pipe_open_v1,