Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/drivers/net/tg3.c

  1 /*
  2  * tg3.c: Broadcom Tigon3 ethernet driver.
  3  *
  4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  6  * Copyright (C) 2004 Sun Microsystems Inc.
  7  * Copyright (C) 2005-2011 Broadcom Corporation.
  8  *
  9  * Firmware is:
 10  *      Derived from proprietary unpublished source code,
 11  *      Copyright (C) 2000-2003 Broadcom Corporation.
 12  *
 13  *      Permission is hereby granted for the distribution of this firmware
 14  *      data in hexadecimal or equivalent format, provided this copyright
 15  *      notice is accompanying it.
 16  */
 17 
 18 
 19 #include <linux/module.h>
 20 #include <linux/moduleparam.h>
 21 #include <linux/stringify.h>
 22 #include <linux/kernel.h>
 23 #include <linux/types.h>
 24 #include <linux/compiler.h>
 25 #include <linux/slab.h>
 26 #include <linux/delay.h>
 27 #include <linux/in.h>
 28 #include <linux/init.h>
 29 #include <linux/ioport.h>
 30 #include <linux/pci.h>
 31 #include <linux/netdevice.h>
 32 #include <linux/etherdevice.h>
 33 #include <linux/skbuff.h>
 34 #include <linux/ethtool.h>
 35 #include <linux/mdio.h>
 36 #include <linux/mii.h>
 37 #include <linux/phy.h>
 38 #include <linux/brcmphy.h>
 39 #include <linux/if_vlan.h>
 40 #include <linux/ip.h>
 41 #include <linux/tcp.h>
 42 #include <linux/workqueue.h>
 43 #include <linux/prefetch.h>
 44 #include <linux/dma-mapping.h>
 45 #include <linux/firmware.h>
 46 
 47 #include <net/checksum.h>
 48 #include <net/ip.h>
 49 
 50 #include <asm/system.h>
 51 #include <linux/io.h>
 52 #include <asm/byteorder.h>
 53 #include <linux/uaccess.h>
 54 
 55 #ifdef CONFIG_SPARC
 56 #include <asm/idprom.h>
 57 #include <asm/prom.h>
 58 #endif
 59 
 60 #define BAR_0   0
 61 #define BAR_2   2
 62 
 63 #include "tg3.h"
 64 
 65 /* Functions & macros to verify TG3_FLAGS types */
 66 
 67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
 68 {
 69         return test_bit(flag, bits);
 70 }
 71 
 72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
 73 {
 74         set_bit(flag, bits);
 75 }
 76 
 77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 78 {
 79         clear_bit(flag, bits);
 80 }
 81 
 82 #define tg3_flag(tp, flag)                              \
 83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
 84 #define tg3_flag_set(tp, flag)                          \
 85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
 86 #define tg3_flag_clear(tp, flag)                        \
 87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
 88 
 89 #define DRV_MODULE_NAME         "tg3"
 90 #define TG3_MAJ_NUM                     3
 91 #define TG3_MIN_NUM                     119
 92 #define DRV_MODULE_VERSION      \
 93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
 94 #define DRV_MODULE_RELDATE      "May 18, 2011"
 95 
 96 #define TG3_DEF_MAC_MODE        0
 97 #define TG3_DEF_RX_MODE         0
 98 #define TG3_DEF_TX_MODE         0
 99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108 
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112 
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114 
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp) \
118         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119 
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121  * You can't change the ring sizes, but you can change where you place
122  * them in the NIC onboard memory.
123  */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING         200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
132 #define TG3_RSS_INDIR_TBL_SIZE          128
133 
134 /* Do not place this n-ring entries value into the tp struct itself,
135  * we really want to expose these constants to GCC so that modulo et
136  * al.  operations are done with shifts and masks instead of with
137  * hw multiply/modulo instructions.  Another solution would be to
138  * replace things like '% foo' with '& (foo - 1)'.
139  */
140 
141 #define TG3_TX_RING_SIZE                512
142 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
143 
144 #define TG3_RX_STD_RING_BYTES(tp) \
145         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
151                                  TG3_TX_RING_SIZE)
152 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153 
154 #define TG3_DMA_BYTE_ENAB               64
155 
156 #define TG3_RX_STD_DMA_SZ               1536
157 #define TG3_RX_JMB_DMA_SZ               9046
158 
159 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
160 
161 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163 
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166 
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169 
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171  * that are at least dword aligned when used in PCIX mode.  The driver
172  * works around this bug by double copying the packet.  This workaround
173  * is built into the normal double copy length check for efficiency.
174  *
175  * However, the double copy is only necessary on those architectures
176  * where unaligned memory accesses are inefficient.  For those architectures
177  * where unaligned memory accesses incur little penalty, we can reintegrate
178  * the 5701 in the normal rx path.  Doing so saves a device structure
179  * dereference by hardcoding the double copy threshold in place.
180  */
181 #define TG3_RX_COPY_THRESHOLD           256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
184 #else
185         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
186 #endif
187 
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
190 
191 #define TG3_RAW_IP_ALIGN 2
192 
193 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
194 
195 #define FIRMWARE_TG3            "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
198 
199 static char version[] __devinitdata =
200         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201 
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209 
210 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213 
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
296         {}
297 };
298 
299 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300 
301 static const struct {
302         const char string[ETH_GSTRING_LEN];
303 } ethtool_stats_keys[] = {
304         { "rx_octets" },
305         { "rx_fragments" },
306         { "rx_ucast_packets" },
307         { "rx_mcast_packets" },
308         { "rx_bcast_packets" },
309         { "rx_fcs_errors" },
310         { "rx_align_errors" },
311         { "rx_xon_pause_rcvd" },
312         { "rx_xoff_pause_rcvd" },
313         { "rx_mac_ctrl_rcvd" },
314         { "rx_xoff_entered" },
315         { "rx_frame_too_long_errors" },
316         { "rx_jabbers" },
317         { "rx_undersize_packets" },
318         { "rx_in_length_errors" },
319         { "rx_out_length_errors" },
320         { "rx_64_or_less_octet_packets" },
321         { "rx_65_to_127_octet_packets" },
322         { "rx_128_to_255_octet_packets" },
323         { "rx_256_to_511_octet_packets" },
324         { "rx_512_to_1023_octet_packets" },
325         { "rx_1024_to_1522_octet_packets" },
326         { "rx_1523_to_2047_octet_packets" },
327         { "rx_2048_to_4095_octet_packets" },
328         { "rx_4096_to_8191_octet_packets" },
329         { "rx_8192_to_9022_octet_packets" },
330 
331         { "tx_octets" },
332         { "tx_collisions" },
333 
334         { "tx_xon_sent" },
335         { "tx_xoff_sent" },
336         { "tx_flow_control" },
337         { "tx_mac_errors" },
338         { "tx_single_collisions" },
339         { "tx_mult_collisions" },
340         { "tx_deferred" },
341         { "tx_excessive_collisions" },
342         { "tx_late_collisions" },
343         { "tx_collide_2times" },
344         { "tx_collide_3times" },
345         { "tx_collide_4times" },
346         { "tx_collide_5times" },
347         { "tx_collide_6times" },
348         { "tx_collide_7times" },
349         { "tx_collide_8times" },
350         { "tx_collide_9times" },
351         { "tx_collide_10times" },
352         { "tx_collide_11times" },
353         { "tx_collide_12times" },
354         { "tx_collide_13times" },
355         { "tx_collide_14times" },
356         { "tx_collide_15times" },
357         { "tx_ucast_packets" },
358         { "tx_mcast_packets" },
359         { "tx_bcast_packets" },
360         { "tx_carrier_sense_errors" },
361         { "tx_discards" },
362         { "tx_errors" },
363 
364         { "dma_writeq_full" },
365         { "dma_write_prioq_full" },
366         { "rxbds_empty" },
367         { "rx_discards" },
368         { "rx_errors" },
369         { "rx_threshold_hit" },
370 
371         { "dma_readq_full" },
372         { "dma_read_prioq_full" },
373         { "tx_comp_queue_full" },
374 
375         { "ring_set_send_prod_index" },
376         { "ring_status_update" },
377         { "nic_irqs" },
378         { "nic_avoided_irqs" },
379         { "nic_tx_threshold_hit" },
380 
381         { "mbuf_lwm_thresh_hit" },
382 };
383 
384 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
385 
386 
387 static const struct {
388         const char string[ETH_GSTRING_LEN];
389 } ethtool_test_keys[] = {
390         { "nvram test     (online) " },
391         { "link test      (online) " },
392         { "register test  (offline)" },
393         { "memory test    (offline)" },
394         { "loopback test  (offline)" },
395         { "interrupt test (offline)" },
396 };
397 
398 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
399 
400 
401 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
402 {
403         writel(val, tp->regs + off);
404 }
405 
406 static u32 tg3_read32(struct tg3 *tp, u32 off)
407 {
408         return readl(tp->regs + off);
409 }
410 
411 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
412 {
413         writel(val, tp->aperegs + off);
414 }
415 
416 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
417 {
418         return readl(tp->aperegs + off);
419 }
420 
421 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
422 {
423         unsigned long flags;
424 
425         spin_lock_irqsave(&tp->indirect_lock, flags);
426         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
427         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
428         spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 }
430 
431 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
432 {
433         writel(val, tp->regs + off);
434         readl(tp->regs + off);
435 }
436 
437 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
438 {
439         unsigned long flags;
440         u32 val;
441 
442         spin_lock_irqsave(&tp->indirect_lock, flags);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
444         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
445         spin_unlock_irqrestore(&tp->indirect_lock, flags);
446         return val;
447 }
448 
449 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
450 {
451         unsigned long flags;
452 
453         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
454                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
455                                        TG3_64BIT_REG_LOW, val);
456                 return;
457         }
458         if (off == TG3_RX_STD_PROD_IDX_REG) {
459                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
460                                        TG3_64BIT_REG_LOW, val);
461                 return;
462         }
463 
464         spin_lock_irqsave(&tp->indirect_lock, flags);
465         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
466         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
467         spin_unlock_irqrestore(&tp->indirect_lock, flags);
468 
469         /* In indirect mode when disabling interrupts, we also need
470          * to clear the interrupt bit in the GRC local ctrl register.
471          */
472         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
473             (val == 0x1)) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
475                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
476         }
477 }
478 
479 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
480 {
481         unsigned long flags;
482         u32 val;
483 
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488         return val;
489 }
490 
491 /* usec_wait specifies the wait time in usec when writing to certain registers
492  * where it is unsafe to read back the register without some delay.
493  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
494  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
495  */
496 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
497 {
498         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
499                 /* Non-posted methods */
500                 tp->write32(tp, off, val);
501         else {
502                 /* Posted method */
503                 tg3_write32(tp, off, val);
504                 if (usec_wait)
505                         udelay(usec_wait);
506                 tp->read32(tp, off);
507         }
508         /* Wait again after the read for the posted method to guarantee that
509          * the wait time is met.
510          */
511         if (usec_wait)
512                 udelay(usec_wait);
513 }
514 
515 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
516 {
517         tp->write32_mbox(tp, off, val);
518         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
519                 tp->read32_mbox(tp, off);
520 }
521 
522 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         void __iomem *mbox = tp->regs + off;
525         writel(val, mbox);
526         if (tg3_flag(tp, TXD_MBOX_HWBUG))
527                 writel(val, mbox);
528         if (tg3_flag(tp, MBOX_WRITE_REORDER))
529                 readl(mbox);
530 }
531 
532 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
533 {
534         return readl(tp->regs + off + GRCMBOX_BASE);
535 }
536 
537 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
538 {
539         writel(val, tp->regs + off + GRCMBOX_BASE);
540 }
541 
542 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
543 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
544 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
545 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
546 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
547 
548 #define tw32(reg, val)                  tp->write32(tp, reg, val)
549 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
550 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
551 #define tr32(reg)                       tp->read32(tp, reg)
552 
553 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
554 {
555         unsigned long flags;
556 
557         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
558             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
559                 return;
560 
561         spin_lock_irqsave(&tp->indirect_lock, flags);
562         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
563                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
565 
566                 /* Always leave this as zero. */
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
568         } else {
569                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
570                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
571 
572                 /* Always leave this as zero. */
573                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
574         }
575         spin_unlock_irqrestore(&tp->indirect_lock, flags);
576 }
577 
578 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
579 {
580         unsigned long flags;
581 
582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
583             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
584                 *val = 0;
585                 return;
586         }
587 
588         spin_lock_irqsave(&tp->indirect_lock, flags);
589         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
590                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
591                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
592 
593                 /* Always leave this as zero. */
594                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
595         } else {
596                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
597                 *val = tr32(TG3PCI_MEM_WIN_DATA);
598 
599                 /* Always leave this as zero. */
600                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
601         }
602         spin_unlock_irqrestore(&tp->indirect_lock, flags);
603 }
604 
605 static void tg3_ape_lock_init(struct tg3 *tp)
606 {
607         int i;
608         u32 regbase;
609 
610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
611                 regbase = TG3_APE_LOCK_GRANT;
612         else
613                 regbase = TG3_APE_PER_LOCK_GRANT;
614 
615         /* Make sure the driver hasn't any stale locks. */
616         for (i = 0; i < 8; i++)
617                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
618 }
619 
620 static int tg3_ape_lock(struct tg3 *tp, int locknum)
621 {
622         int i, off;
623         int ret = 0;
624         u32 status, req, gnt;
625 
626         if (!tg3_flag(tp, ENABLE_APE))
627                 return 0;
628 
629         switch (locknum) {
630         case TG3_APE_LOCK_GRC:
631         case TG3_APE_LOCK_MEM:
632                 break;
633         default:
634                 return -EINVAL;
635         }
636 
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
638                 req = TG3_APE_LOCK_REQ;
639                 gnt = TG3_APE_LOCK_GRANT;
640         } else {
641                 req = TG3_APE_PER_LOCK_REQ;
642                 gnt = TG3_APE_PER_LOCK_GRANT;
643         }
644 
645         off = 4 * locknum;
646 
647         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
648 
649         /* Wait for up to 1 millisecond to acquire lock. */
650         for (i = 0; i < 100; i++) {
651                 status = tg3_ape_read32(tp, gnt + off);
652                 if (status == APE_LOCK_GRANT_DRIVER)
653                         break;
654                 udelay(10);
655         }
656 
657         if (status != APE_LOCK_GRANT_DRIVER) {
658                 /* Revoke the lock request. */
659                 tg3_ape_write32(tp, gnt + off,
660                                 APE_LOCK_GRANT_DRIVER);
661 
662                 ret = -EBUSY;
663         }
664 
665         return ret;
666 }
667 
668 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
669 {
670         u32 gnt;
671 
672         if (!tg3_flag(tp, ENABLE_APE))
673                 return;
674 
675         switch (locknum) {
676         case TG3_APE_LOCK_GRC:
677         case TG3_APE_LOCK_MEM:
678                 break;
679         default:
680                 return;
681         }
682 
683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
684                 gnt = TG3_APE_LOCK_GRANT;
685         else
686                 gnt = TG3_APE_PER_LOCK_GRANT;
687 
688         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
689 }
690 
691 static void tg3_disable_ints(struct tg3 *tp)
692 {
693         int i;
694 
695         tw32(TG3PCI_MISC_HOST_CTRL,
696              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
697         for (i = 0; i < tp->irq_max; i++)
698                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
699 }
700 
701 static void tg3_enable_ints(struct tg3 *tp)
702 {
703         int i;
704 
705         tp->irq_sync = 0;
706         wmb();
707 
708         tw32(TG3PCI_MISC_HOST_CTRL,
709              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
710 
711         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
712         for (i = 0; i < tp->irq_cnt; i++) {
713                 struct tg3_napi *tnapi = &tp->napi[i];
714 
715                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716                 if (tg3_flag(tp, 1SHOT_MSI))
717                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
718 
719                 tp->coal_now |= tnapi->coal_now;
720         }
721 
722         /* Force an initial interrupt */
723         if (!tg3_flag(tp, TAGGED_STATUS) &&
724             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
725                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
726         else
727                 tw32(HOSTCC_MODE, tp->coal_now);
728 
729         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
730 }
731 
732 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
733 {
734         struct tg3 *tp = tnapi->tp;
735         struct tg3_hw_status *sblk = tnapi->hw_status;
736         unsigned int work_exists = 0;
737 
738         /* check for phy events */
739         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
740                 if (sblk->status & SD_STATUS_LINK_CHG)
741                         work_exists = 1;
742         }
743         /* check for RX/TX work to do */
744         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
745             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
746                 work_exists = 1;
747 
748         return work_exists;
749 }
750 
751 /* tg3_int_reenable
752  *  similar to tg3_enable_ints, but it accurately determines whether there
753  *  is new work pending and can return without flushing the PIO write
754  *  which reenables interrupts
755  */
756 static void tg3_int_reenable(struct tg3_napi *tnapi)
757 {
758         struct tg3 *tp = tnapi->tp;
759 
760         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
761         mmiowb();
762 
763         /* When doing tagged status, this work check is unnecessary.
764          * The last_tag we write above tells the chip which piece of
765          * work we've completed.
766          */
767         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
768                 tw32(HOSTCC_MODE, tp->coalesce_mode |
769                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
770 }
771 
772 static void tg3_switch_clocks(struct tg3 *tp)
773 {
774         u32 clock_ctrl;
775         u32 orig_clock_ctrl;
776 
777         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
778                 return;
779 
780         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
781 
782         orig_clock_ctrl = clock_ctrl;
783         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
784                        CLOCK_CTRL_CLKRUN_OENABLE |
785                        0x1f);
786         tp->pci_clock_ctrl = clock_ctrl;
787 
788         if (tg3_flag(tp, 5705_PLUS)) {
789                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
790                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
791                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
792                 }
793         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
794                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
795                             clock_ctrl |
796                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
797                             40);
798                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
799                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
800                             40);
801         }
802         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
803 }
804 
805 #define PHY_BUSY_LOOPS  5000
806 
807 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
808 {
809         u32 frame_val;
810         unsigned int loops;
811         int ret;
812 
813         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
814                 tw32_f(MAC_MI_MODE,
815                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
816                 udelay(80);
817         }
818 
819         *val = 0x0;
820 
821         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
822                       MI_COM_PHY_ADDR_MASK);
823         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
824                       MI_COM_REG_ADDR_MASK);
825         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
826 
827         tw32_f(MAC_MI_COM, frame_val);
828 
829         loops = PHY_BUSY_LOOPS;
830         while (loops != 0) {
831                 udelay(10);
832                 frame_val = tr32(MAC_MI_COM);
833 
834                 if ((frame_val & MI_COM_BUSY) == 0) {
835                         udelay(5);
836                         frame_val = tr32(MAC_MI_COM);
837                         break;
838                 }
839                 loops -= 1;
840         }
841 
842         ret = -EBUSY;
843         if (loops != 0) {
844                 *val = frame_val & MI_COM_DATA_MASK;
845                 ret = 0;
846         }
847 
848         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
849                 tw32_f(MAC_MI_MODE, tp->mi_mode);
850                 udelay(80);
851         }
852 
853         return ret;
854 }
855 
856 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
857 {
858         u32 frame_val;
859         unsigned int loops;
860         int ret;
861 
862         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
863             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
864                 return 0;
865 
866         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
867                 tw32_f(MAC_MI_MODE,
868                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
869                 udelay(80);
870         }
871 
872         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
873                       MI_COM_PHY_ADDR_MASK);
874         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
875                       MI_COM_REG_ADDR_MASK);
876         frame_val |= (val & MI_COM_DATA_MASK);
877         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
878 
879         tw32_f(MAC_MI_COM, frame_val);
880 
881         loops = PHY_BUSY_LOOPS;
882         while (loops != 0) {
883                 udelay(10);
884                 frame_val = tr32(MAC_MI_COM);
885                 if ((frame_val & MI_COM_BUSY) == 0) {
886                         udelay(5);
887                         frame_val = tr32(MAC_MI_COM);
888                         break;
889                 }
890                 loops -= 1;
891         }
892 
893         ret = -EBUSY;
894         if (loops != 0)
895                 ret = 0;
896 
897         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
898                 tw32_f(MAC_MI_MODE, tp->mi_mode);
899                 udelay(80);
900         }
901 
902         return ret;
903 }
904 
905 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
906 {
907         int err;
908 
909         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
910         if (err)
911                 goto done;
912 
913         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
914         if (err)
915                 goto done;
916 
917         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
918                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
919         if (err)
920                 goto done;
921 
922         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
923 
924 done:
925         return err;
926 }
927 
928 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
929 {
930         int err;
931 
932         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
933         if (err)
934                 goto done;
935 
936         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
937         if (err)
938                 goto done;
939 
940         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
941                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
942         if (err)
943                 goto done;
944 
945         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
946 
947 done:
948         return err;
949 }
950 
951 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
952 {
953         int err;
954 
955         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
956         if (!err)
957                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
958 
959         return err;
960 }
961 
962 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
963 {
964         int err;
965 
966         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
967         if (!err)
968                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
969 
970         return err;
971 }
972 
973 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
974 {
975         int err;
976 
977         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
978                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
979                            MII_TG3_AUXCTL_SHDWSEL_MISC);
980         if (!err)
981                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
982 
983         return err;
984 }
985 
986 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
987 {
988         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
989                 set |= MII_TG3_AUXCTL_MISC_WREN;
990 
991         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
992 }
993 
994 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
995         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
996                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
997                              MII_TG3_AUXCTL_ACTL_TX_6DB)
998 
999 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1000         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1001                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1002 
1003 static int tg3_bmcr_reset(struct tg3 *tp)
1004 {
1005         u32 phy_control;
1006         int limit, err;
1007 
1008         /* OK, reset it, and poll the BMCR_RESET bit until it
1009          * clears or we time out.
1010          */
1011         phy_control = BMCR_RESET;
1012         err = tg3_writephy(tp, MII_BMCR, phy_control);
1013         if (err != 0)
1014                 return -EBUSY;
1015 
1016         limit = 5000;
1017         while (limit--) {
1018                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1019                 if (err != 0)
1020                         return -EBUSY;
1021 
1022                 if ((phy_control & BMCR_RESET) == 0) {
1023                         udelay(40);
1024                         break;
1025                 }
1026                 udelay(10);
1027         }
1028         if (limit < 0)
1029                 return -EBUSY;
1030 
1031         return 0;
1032 }
1033 
1034 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1035 {
1036         struct tg3 *tp = bp->priv;
1037         u32 val;
1038 
1039         spin_lock_bh(&tp->lock);
1040 
1041         if (tg3_readphy(tp, reg, &val))
1042                 val = -EIO;
1043 
1044         spin_unlock_bh(&tp->lock);
1045 
1046         return val;
1047 }
1048 
1049 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1050 {
1051         struct tg3 *tp = bp->priv;
1052         u32 ret = 0;
1053 
1054         spin_lock_bh(&tp->lock);
1055 
1056         if (tg3_writephy(tp, reg, val))
1057                 ret = -EIO;
1058 
1059         spin_unlock_bh(&tp->lock);
1060 
1061         return ret;
1062 }
1063 
1064 static int tg3_mdio_reset(struct mii_bus *bp)
1065 {
1066         return 0;
1067 }
1068 
1069 static void tg3_mdio_config_5785(struct tg3 *tp)
1070 {
1071         u32 val;
1072         struct phy_device *phydev;
1073 
1074         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1075         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1076         case PHY_ID_BCM50610:
1077         case PHY_ID_BCM50610M:
1078                 val = MAC_PHYCFG2_50610_LED_MODES;
1079                 break;
1080         case PHY_ID_BCMAC131:
1081                 val = MAC_PHYCFG2_AC131_LED_MODES;
1082                 break;
1083         case PHY_ID_RTL8211C:
1084                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1085                 break;
1086         case PHY_ID_RTL8201E:
1087                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1088                 break;
1089         default:
1090                 return;
1091         }
1092 
1093         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1094                 tw32(MAC_PHYCFG2, val);
1095 
1096                 val = tr32(MAC_PHYCFG1);
1097                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1098                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1099                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1100                 tw32(MAC_PHYCFG1, val);
1101 
1102                 return;
1103         }
1104 
1105         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1106                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1107                        MAC_PHYCFG2_FMODE_MASK_MASK |
1108                        MAC_PHYCFG2_GMODE_MASK_MASK |
1109                        MAC_PHYCFG2_ACT_MASK_MASK   |
1110                        MAC_PHYCFG2_QUAL_MASK_MASK |
1111                        MAC_PHYCFG2_INBAND_ENABLE;
1112 
1113         tw32(MAC_PHYCFG2, val);
1114 
1115         val = tr32(MAC_PHYCFG1);
1116         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1117                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1118         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1119                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1120                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1121                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1122                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1123         }
1124         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1125                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1126         tw32(MAC_PHYCFG1, val);
1127 
1128         val = tr32(MAC_EXT_RGMII_MODE);
1129         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1130                  MAC_RGMII_MODE_RX_QUALITY |
1131                  MAC_RGMII_MODE_RX_ACTIVITY |
1132                  MAC_RGMII_MODE_RX_ENG_DET |
1133                  MAC_RGMII_MODE_TX_ENABLE |
1134                  MAC_RGMII_MODE_TX_LOWPWR |
1135                  MAC_RGMII_MODE_TX_RESET);
1136         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1137                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1138                         val |= MAC_RGMII_MODE_RX_INT_B |
1139                                MAC_RGMII_MODE_RX_QUALITY |
1140                                MAC_RGMII_MODE_RX_ACTIVITY |
1141                                MAC_RGMII_MODE_RX_ENG_DET;
1142                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1143                         val |= MAC_RGMII_MODE_TX_ENABLE |
1144                                MAC_RGMII_MODE_TX_LOWPWR |
1145                                MAC_RGMII_MODE_TX_RESET;
1146         }
1147         tw32(MAC_EXT_RGMII_MODE, val);
1148 }
1149 
1150 static void tg3_mdio_start(struct tg3 *tp)
1151 {
1152         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1153         tw32_f(MAC_MI_MODE, tp->mi_mode);
1154         udelay(80);
1155 
1156         if (tg3_flag(tp, MDIOBUS_INITED) &&
1157             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1158                 tg3_mdio_config_5785(tp);
1159 }
1160 
1161 static int tg3_mdio_init(struct tg3 *tp)
1162 {
1163         int i;
1164         u32 reg;
1165         struct phy_device *phydev;
1166 
1167         if (tg3_flag(tp, 5717_PLUS)) {
1168                 u32 is_serdes;
1169 
1170                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1171 
1172                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1173                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1174                 else
1175                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1176                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1177                 if (is_serdes)
1178                         tp->phy_addr += 7;
1179         } else
1180                 tp->phy_addr = TG3_PHY_MII_ADDR;
1181 
1182         tg3_mdio_start(tp);
1183 
1184         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1185                 return 0;
1186 
1187         tp->mdio_bus = mdiobus_alloc();
1188         if (tp->mdio_bus == NULL)
1189                 return -ENOMEM;
1190 
1191         tp->mdio_bus->name     = "tg3 mdio bus";
1192         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1193                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1194         tp->mdio_bus->priv     = tp;
1195         tp->mdio_bus->parent   = &tp->pdev->dev;
1196         tp->mdio_bus->read     = &tg3_mdio_read;
1197         tp->mdio_bus->write    = &tg3_mdio_write;
1198         tp->mdio_bus->reset    = &tg3_mdio_reset;
1199         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1200         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1201 
1202         for (i = 0; i < PHY_MAX_ADDR; i++)
1203                 tp->mdio_bus->irq[i] = PHY_POLL;
1204 
1205         /* The bus registration will look for all the PHYs on the mdio bus.
1206          * Unfortunately, it does not ensure the PHY is powered up before
1207          * accessing the PHY ID registers.  A chip reset is the
1208          * quickest way to bring the device back to an operational state..
1209          */
1210         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1211                 tg3_bmcr_reset(tp);
1212 
1213         i = mdiobus_register(tp->mdio_bus);
1214         if (i) {
1215                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1216                 mdiobus_free(tp->mdio_bus);
1217                 return i;
1218         }
1219 
1220         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1221 
1222         if (!phydev || !phydev->drv) {
1223                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1224                 mdiobus_unregister(tp->mdio_bus);
1225                 mdiobus_free(tp->mdio_bus);
1226                 return -ENODEV;
1227         }
1228 
1229         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1230         case PHY_ID_BCM57780:
1231                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1232                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1233                 break;
1234         case PHY_ID_BCM50610:
1235         case PHY_ID_BCM50610M:
1236                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1237                                      PHY_BRCM_RX_REFCLK_UNUSED |
1238                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1239                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1240                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1241                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1242                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1243                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1244                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1245                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1246                 /* fallthru */
1247         case PHY_ID_RTL8211C:
1248                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1249                 break;
1250         case PHY_ID_RTL8201E:
1251         case PHY_ID_BCMAC131:
1252                 phydev->interface = PHY_INTERFACE_MODE_MII;
1253                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1254                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1255                 break;
1256         }
1257 
1258         tg3_flag_set(tp, MDIOBUS_INITED);
1259 
1260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1261                 tg3_mdio_config_5785(tp);
1262 
1263         return 0;
1264 }
1265 
1266 static void tg3_mdio_fini(struct tg3 *tp)
1267 {
1268         if (tg3_flag(tp, MDIOBUS_INITED)) {
1269                 tg3_flag_clear(tp, MDIOBUS_INITED);
1270                 mdiobus_unregister(tp->mdio_bus);
1271                 mdiobus_free(tp->mdio_bus);
1272         }
1273 }
1274 
1275 /* tp->lock is held. */
1276 static inline void tg3_generate_fw_event(struct tg3 *tp)
1277 {
1278         u32 val;
1279 
1280         val = tr32(GRC_RX_CPU_EVENT);
1281         val |= GRC_RX_CPU_DRIVER_EVENT;
1282         tw32_f(GRC_RX_CPU_EVENT, val);
1283 
1284         tp->last_event_jiffies = jiffies;
1285 }
1286 
1287 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1288 
1289 /* tp->lock is held. */
1290 static void tg3_wait_for_event_ack(struct tg3 *tp)
1291 {
1292         int i;
1293         unsigned int delay_cnt;
1294         long time_remain;
1295 
1296         /* If enough time has passed, no wait is necessary. */
1297         time_remain = (long)(tp->last_event_jiffies + 1 +
1298                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1299                       (long)jiffies;
1300         if (time_remain < 0)
1301                 return;
1302 
1303         /* Check if we can shorten the wait time. */
1304         delay_cnt = jiffies_to_usecs(time_remain);
1305         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1306                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1307         delay_cnt = (delay_cnt >> 3) + 1;
1308 
1309         for (i = 0; i < delay_cnt; i++) {
1310                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1311                         break;
1312                 udelay(8);
1313         }
1314 }
1315 
1316 /* tp->lock is held. */
1317 static void tg3_ump_link_report(struct tg3 *tp)
1318 {
1319         u32 reg;
1320         u32 val;
1321 
1322         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1323                 return;
1324 
1325         tg3_wait_for_event_ack(tp);
1326 
1327         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1328 
1329         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1330 
1331         val = 0;
1332         if (!tg3_readphy(tp, MII_BMCR, &reg))
1333                 val = reg << 16;
1334         if (!tg3_readphy(tp, MII_BMSR, &reg))
1335                 val |= (reg & 0xffff);
1336         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1337 
1338         val = 0;
1339         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1340                 val = reg << 16;
1341         if (!tg3_readphy(tp, MII_LPA, &reg))
1342                 val |= (reg & 0xffff);
1343         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1344 
1345         val = 0;
1346         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1347                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1348                         val = reg << 16;
1349                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1350                         val |= (reg & 0xffff);
1351         }
1352         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1353 
1354         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1355                 val = reg << 16;
1356         else
1357                 val = 0;
1358         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1359 
1360         tg3_generate_fw_event(tp);
1361 }
1362 
1363 static void tg3_link_report(struct tg3 *tp)
1364 {
1365         if (!netif_carrier_ok(tp->dev)) {
1366                 netif_info(tp, link, tp->dev, "Link is down\n");
1367                 tg3_ump_link_report(tp);
1368         } else if (netif_msg_link(tp)) {
1369                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1370                             (tp->link_config.active_speed == SPEED_1000 ?
1371                              1000 :
1372                              (tp->link_config.active_speed == SPEED_100 ?
1373                               100 : 10)),
1374                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1375                              "full" : "half"));
1376 
1377                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1378                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1379                             "on" : "off",
1380                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1381                             "on" : "off");
1382 
1383                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1384                         netdev_info(tp->dev, "EEE is %s\n",
1385                                     tp->setlpicnt ? "enabled" : "disabled");
1386 
1387                 tg3_ump_link_report(tp);
1388         }
1389 }
1390 
1391 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1392 {
1393         u16 miireg;
1394 
1395         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1396                 miireg = ADVERTISE_PAUSE_CAP;
1397         else if (flow_ctrl & FLOW_CTRL_TX)
1398                 miireg = ADVERTISE_PAUSE_ASYM;
1399         else if (flow_ctrl & FLOW_CTRL_RX)
1400                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1401         else
1402                 miireg = 0;
1403 
1404         return miireg;
1405 }
1406 
1407 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1408 {
1409         u16 miireg;
1410 
1411         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1412                 miireg = ADVERTISE_1000XPAUSE;
1413         else if (flow_ctrl & FLOW_CTRL_TX)
1414                 miireg = ADVERTISE_1000XPSE_ASYM;
1415         else if (flow_ctrl & FLOW_CTRL_RX)
1416                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1417         else
1418                 miireg = 0;
1419 
1420         return miireg;
1421 }
1422 
1423 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1424 {
1425         u8 cap = 0;
1426 
1427         if (lcladv & ADVERTISE_1000XPAUSE) {
1428                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1429                         if (rmtadv & LPA_1000XPAUSE)
1430                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1431                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1432                                 cap = FLOW_CTRL_RX;
1433                 } else {
1434                         if (rmtadv & LPA_1000XPAUSE)
1435                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1436                 }
1437         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1438                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1439                         cap = FLOW_CTRL_TX;
1440         }
1441 
1442         return cap;
1443 }
1444 
1445 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1446 {
1447         u8 autoneg;
1448         u8 flowctrl = 0;
1449         u32 old_rx_mode = tp->rx_mode;
1450         u32 old_tx_mode = tp->tx_mode;
1451 
1452         if (tg3_flag(tp, USE_PHYLIB))
1453                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1454         else
1455                 autoneg = tp->link_config.autoneg;
1456 
1457         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1458                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1459                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1460                 else
1461                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1462         } else
1463                 flowctrl = tp->link_config.flowctrl;
1464 
1465         tp->link_config.active_flowctrl = flowctrl;
1466 
1467         if (flowctrl & FLOW_CTRL_RX)
1468                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1469         else
1470                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1471 
1472         if (old_rx_mode != tp->rx_mode)
1473                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1474 
1475         if (flowctrl & FLOW_CTRL_TX)
1476                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1477         else
1478                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1479 
1480         if (old_tx_mode != tp->tx_mode)
1481                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1482 }
1483 
1484 static void tg3_adjust_link(struct net_device *dev)
1485 {
1486         u8 oldflowctrl, linkmesg = 0;
1487         u32 mac_mode, lcl_adv, rmt_adv;
1488         struct tg3 *tp = netdev_priv(dev);
1489         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1490 
1491         spin_lock_bh(&tp->lock);
1492 
1493         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1494                                     MAC_MODE_HALF_DUPLEX);
1495 
1496         oldflowctrl = tp->link_config.active_flowctrl;
1497 
1498         if (phydev->link) {
1499                 lcl_adv = 0;
1500                 rmt_adv = 0;
1501 
1502                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1503                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1504                 else if (phydev->speed == SPEED_1000 ||
1505                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1506                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1507                 else
1508                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1509 
1510                 if (phydev->duplex == DUPLEX_HALF)
1511                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1512                 else {
1513                         lcl_adv = tg3_advert_flowctrl_1000T(
1514                                   tp->link_config.flowctrl);
1515 
1516                         if (phydev->pause)
1517                                 rmt_adv = LPA_PAUSE_CAP;
1518                         if (phydev->asym_pause)
1519                                 rmt_adv |= LPA_PAUSE_ASYM;
1520                 }
1521 
1522                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1523         } else
1524                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1525 
1526         if (mac_mode != tp->mac_mode) {
1527                 tp->mac_mode = mac_mode;
1528                 tw32_f(MAC_MODE, tp->mac_mode);
1529                 udelay(40);
1530         }
1531 
1532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1533                 if (phydev->speed == SPEED_10)
1534                         tw32(MAC_MI_STAT,
1535                              MAC_MI_STAT_10MBPS_MODE |
1536                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1537                 else
1538                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1539         }
1540 
1541         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1542                 tw32(MAC_TX_LENGTHS,
1543                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1544                       (6 << TX_LENGTHS_IPG_SHIFT) |
1545                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1546         else
1547                 tw32(MAC_TX_LENGTHS,
1548                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1549                       (6 << TX_LENGTHS_IPG_SHIFT) |
1550                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1551 
1552         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1553             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1554             phydev->speed != tp->link_config.active_speed ||
1555             phydev->duplex != tp->link_config.active_duplex ||
1556             oldflowctrl != tp->link_config.active_flowctrl)
1557                 linkmesg = 1;
1558 
1559         tp->link_config.active_speed = phydev->speed;
1560         tp->link_config.active_duplex = phydev->duplex;
1561 
1562         spin_unlock_bh(&tp->lock);
1563 
1564         if (linkmesg)
1565                 tg3_link_report(tp);
1566 }
1567 
1568 static int tg3_phy_init(struct tg3 *tp)
1569 {
1570         struct phy_device *phydev;
1571 
1572         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1573                 return 0;
1574 
1575         /* Bring the PHY back to a known state. */
1576         tg3_bmcr_reset(tp);
1577 
1578         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1579 
1580         /* Attach the MAC to the PHY. */
1581         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1582                              phydev->dev_flags, phydev->interface);
1583         if (IS_ERR(phydev)) {
1584                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1585                 return PTR_ERR(phydev);
1586         }
1587 
1588         /* Mask with MAC supported features. */
1589         switch (phydev->interface) {
1590         case PHY_INTERFACE_MODE_GMII:
1591         case PHY_INTERFACE_MODE_RGMII:
1592                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1593                         phydev->supported &= (PHY_GBIT_FEATURES |
1594                                               SUPPORTED_Pause |
1595                                               SUPPORTED_Asym_Pause);
1596                         break;
1597                 }
1598                 /* fallthru */
1599         case PHY_INTERFACE_MODE_MII:
1600                 phydev->supported &= (PHY_BASIC_FEATURES |
1601                                       SUPPORTED_Pause |
1602                                       SUPPORTED_Asym_Pause);
1603                 break;
1604         default:
1605                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1606                 return -EINVAL;
1607         }
1608 
1609         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1610 
1611         phydev->advertising = phydev->supported;
1612 
1613         return 0;
1614 }
1615 
1616 static void tg3_phy_start(struct tg3 *tp)
1617 {
1618         struct phy_device *phydev;
1619 
1620         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1621                 return;
1622 
1623         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1624 
1625         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1626                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1627                 phydev->speed = tp->link_config.orig_speed;
1628                 phydev->duplex = tp->link_config.orig_duplex;
1629                 phydev->autoneg = tp->link_config.orig_autoneg;
1630                 phydev->advertising = tp->link_config.orig_advertising;
1631         }
1632 
1633         phy_start(phydev);
1634 
1635         phy_start_aneg(phydev);
1636 }
1637 
1638 static void tg3_phy_stop(struct tg3 *tp)
1639 {
1640         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1641                 return;
1642 
1643         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1644 }
1645 
1646 static void tg3_phy_fini(struct tg3 *tp)
1647 {
1648         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1649                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1650                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1651         }
1652 }
1653 
1654 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1655 {
1656         u32 phytest;
1657 
1658         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1659                 u32 phy;
1660 
1661                 tg3_writephy(tp, MII_TG3_FET_TEST,
1662                              phytest | MII_TG3_FET_SHADOW_EN);
1663                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1664                         if (enable)
1665                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666                         else
1667                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1668                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1669                 }
1670                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1671         }
1672 }
1673 
1674 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1675 {
1676         u32 reg;
1677 
1678         if (!tg3_flag(tp, 5705_PLUS) ||
1679             (tg3_flag(tp, 5717_PLUS) &&
1680              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1681                 return;
1682 
1683         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1684                 tg3_phy_fet_toggle_apd(tp, enable);
1685                 return;
1686         }
1687 
1688         reg = MII_TG3_MISC_SHDW_WREN |
1689               MII_TG3_MISC_SHDW_SCR5_SEL |
1690               MII_TG3_MISC_SHDW_SCR5_LPED |
1691               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1692               MII_TG3_MISC_SHDW_SCR5_SDTL |
1693               MII_TG3_MISC_SHDW_SCR5_C125OE;
1694         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1695                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1696 
1697         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1698 
1699 
1700         reg = MII_TG3_MISC_SHDW_WREN |
1701               MII_TG3_MISC_SHDW_APD_SEL |
1702               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1703         if (enable)
1704                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1705 
1706         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1707 }
1708 
1709 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1710 {
1711         u32 phy;
1712 
1713         if (!tg3_flag(tp, 5705_PLUS) ||
1714             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1715                 return;
1716 
1717         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1718                 u32 ephy;
1719 
1720                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1721                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1722 
1723                         tg3_writephy(tp, MII_TG3_FET_TEST,
1724                                      ephy | MII_TG3_FET_SHADOW_EN);
1725                         if (!tg3_readphy(tp, reg, &phy)) {
1726                                 if (enable)
1727                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728                                 else
1729                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1730                                 tg3_writephy(tp, reg, phy);
1731                         }
1732                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1733                 }
1734         } else {
1735                 int ret;
1736 
1737                 ret = tg3_phy_auxctl_read(tp,
1738                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1739                 if (!ret) {
1740                         if (enable)
1741                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742                         else
1743                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1744                         tg3_phy_auxctl_write(tp,
1745                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1746                 }
1747         }
1748 }
1749 
1750 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1751 {
1752         int ret;
1753         u32 val;
1754 
1755         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1756                 return;
1757 
1758         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1759         if (!ret)
1760                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1761                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1762 }
1763 
1764 static void tg3_phy_apply_otp(struct tg3 *tp)
1765 {
1766         u32 otp, phy;
1767 
1768         if (!tp->phy_otp)
1769                 return;
1770 
1771         otp = tp->phy_otp;
1772 
1773         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1774                 return;
1775 
1776         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1777         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1778         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1779 
1780         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1781               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1782         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1783 
1784         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1785         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1786         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1787 
1788         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1789         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1790 
1791         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1792         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1793 
1794         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1795               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1796         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1797 
1798         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1799 }
1800 
1801 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1802 {
1803         u32 val;
1804 
1805         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1806                 return;
1807 
1808         tp->setlpicnt = 0;
1809 
1810         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1811             current_link_up == 1 &&
1812             tp->link_config.active_duplex == DUPLEX_FULL &&
1813             (tp->link_config.active_speed == SPEED_100 ||
1814              tp->link_config.active_speed == SPEED_1000)) {
1815                 u32 eeectl;
1816 
1817                 if (tp->link_config.active_speed == SPEED_1000)
1818                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1819                 else
1820                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1821 
1822                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1823 
1824                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1825                                   TG3_CL45_D7_EEERES_STAT, &val);
1826 
1827                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1828                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1829                         tp->setlpicnt = 2;
1830         }
1831 
1832         if (!tp->setlpicnt) {
1833                 val = tr32(TG3_CPMU_EEE_MODE);
1834                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1835         }
1836 }
1837 
1838 static void tg3_phy_eee_enable(struct tg3 *tp)
1839 {
1840         u32 val;
1841 
1842         if (tp->link_config.active_speed == SPEED_1000 &&
1843             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1844              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1845              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1846             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1847                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1848                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1849         }
1850 
1851         val = tr32(TG3_CPMU_EEE_MODE);
1852         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1853 }
1854 
1855 static int tg3_wait_macro_done(struct tg3 *tp)
1856 {
1857         int limit = 100;
1858 
1859         while (limit--) {
1860                 u32 tmp32;
1861 
1862                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1863                         if ((tmp32 & 0x1000) == 0)
1864                                 break;
1865                 }
1866         }
1867         if (limit < 0)
1868                 return -EBUSY;
1869 
1870         return 0;
1871 }
1872 
1873 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1874 {
1875         static const u32 test_pat[4][6] = {
1876         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1877         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1878         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1879         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1880         };
1881         int chan;
1882 
1883         for (chan = 0; chan < 4; chan++) {
1884                 int i;
1885 
1886                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1887                              (chan * 0x2000) | 0x0200);
1888                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1889 
1890                 for (i = 0; i < 6; i++)
1891                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1892                                      test_pat[chan][i]);
1893 
1894                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1895                 if (tg3_wait_macro_done(tp)) {
1896                         *resetp = 1;
1897                         return -EBUSY;
1898                 }
1899 
1900                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1901                              (chan * 0x2000) | 0x0200);
1902                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1903                 if (tg3_wait_macro_done(tp)) {
1904                         *resetp = 1;
1905                         return -EBUSY;
1906                 }
1907 
1908                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1909                 if (tg3_wait_macro_done(tp)) {
1910                         *resetp = 1;
1911                         return -EBUSY;
1912                 }
1913 
1914                 for (i = 0; i < 6; i += 2) {
1915                         u32 low, high;
1916 
1917                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1918                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1919                             tg3_wait_macro_done(tp)) {
1920                                 *resetp = 1;
1921                                 return -EBUSY;
1922                         }
1923                         low &= 0x7fff;
1924                         high &= 0x000f;
1925                         if (low != test_pat[chan][i] ||
1926                             high != test_pat[chan][i+1]) {
1927                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1928                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1929                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1930 
1931                                 return -EBUSY;
1932                         }
1933                 }
1934         }
1935 
1936         return 0;
1937 }
1938 
1939 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1940 {
1941         int chan;
1942 
1943         for (chan = 0; chan < 4; chan++) {
1944                 int i;
1945 
1946                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1947                              (chan * 0x2000) | 0x0200);
1948                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1949                 for (i = 0; i < 6; i++)
1950                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1951                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1952                 if (tg3_wait_macro_done(tp))
1953                         return -EBUSY;
1954         }
1955 
1956         return 0;
1957 }
1958 
1959 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1960 {
1961         u32 reg32, phy9_orig;
1962         int retries, do_phy_reset, err;
1963 
1964         retries = 10;
1965         do_phy_reset = 1;
1966         do {
1967                 if (do_phy_reset) {
1968                         err = tg3_bmcr_reset(tp);
1969                         if (err)
1970                                 return err;
1971                         do_phy_reset = 0;
1972                 }
1973 
1974                 /* Disable transmitter and interrupt.  */
1975                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1976                         continue;
1977 
1978                 reg32 |= 0x3000;
1979                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1980 
1981                 /* Set full-duplex, 1000 mbps.  */
1982                 tg3_writephy(tp, MII_BMCR,
1983                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1984 
1985                 /* Set to master mode.  */
1986                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1987                         continue;
1988 
1989                 tg3_writephy(tp, MII_TG3_CTRL,
1990                              (MII_TG3_CTRL_AS_MASTER |
1991                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1992 
1993                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1994                 if (err)
1995                         return err;
1996 
1997                 /* Block the PHY control access.  */
1998                 tg3_phydsp_write(tp, 0x8005, 0x0800);
1999 
2000                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2001                 if (!err)
2002                         break;
2003         } while (--retries);
2004 
2005         err = tg3_phy_reset_chanpat(tp);
2006         if (err)
2007                 return err;
2008 
2009         tg3_phydsp_write(tp, 0x8005, 0x0000);
2010 
2011         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2012         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2013 
2014         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2015 
2016         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2017 
2018         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2019                 reg32 &= ~0x3000;
2020                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2021         } else if (!err)
2022                 err = -EBUSY;
2023 
2024         return err;
2025 }
2026 
2027 /* This will reset the tigon3 PHY if there is no valid
2028  * link unless the FORCE argument is non-zero.
2029  */
2030 static int tg3_phy_reset(struct tg3 *tp)
2031 {
2032         u32 val, cpmuctrl;
2033         int err;
2034 
2035         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2036                 val = tr32(GRC_MISC_CFG);
2037                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2038                 udelay(40);
2039         }
2040         err  = tg3_readphy(tp, MII_BMSR, &val);
2041         err |= tg3_readphy(tp, MII_BMSR, &val);
2042         if (err != 0)
2043                 return -EBUSY;
2044 
2045         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2046                 netif_carrier_off(tp->dev);
2047                 tg3_link_report(tp);
2048         }
2049 
2050         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2053                 err = tg3_phy_reset_5703_4_5(tp);
2054                 if (err)
2055                         return err;
2056                 goto out;
2057         }
2058 
2059         cpmuctrl = 0;
2060         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2061             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2062                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2063                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2064                         tw32(TG3_CPMU_CTRL,
2065                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2066         }
2067 
2068         err = tg3_bmcr_reset(tp);
2069         if (err)
2070                 return err;
2071 
2072         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2073                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2074                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2075 
2076                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2077         }
2078 
2079         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2080             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2081                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2082                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2083                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2084                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2085                         udelay(40);
2086                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2087                 }
2088         }
2089 
2090         if (tg3_flag(tp, 5717_PLUS) &&
2091             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2092                 return 0;
2093 
2094         tg3_phy_apply_otp(tp);
2095 
2096         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2097                 tg3_phy_toggle_apd(tp, true);
2098         else
2099                 tg3_phy_toggle_apd(tp, false);
2100 
2101 out:
2102         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2103             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2104                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2105                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2106                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2107         }
2108 
2109         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2110                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2111                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112         }
2113 
2114         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2115                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2116                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2117                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2118                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2119                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2120                 }
2121         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2122                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2123                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2124                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2125                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2126                                 tg3_writephy(tp, MII_TG3_TEST1,
2127                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2128                         } else
2129                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2130 
2131                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2132                 }
2133         }
2134 
2135         /* Set Extended packet length bit (bit 14) on all chips that */
2136         /* support jumbo frames */
2137         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2138                 /* Cannot do read-modify-write on 5401 */
2139                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2140         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2141                 /* Set bit 14 with read-modify-write to preserve other bits */
2142                 err = tg3_phy_auxctl_read(tp,
2143                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2144                 if (!err)
2145                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2146                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2147         }
2148 
2149         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2150          * jumbo frames transmission.
2151          */
2152         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2153                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2154                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2155                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2156         }
2157 
2158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2159                 /* adjust output voltage */
2160                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2161         }
2162 
2163         tg3_phy_toggle_automdix(tp, 1);
2164         tg3_phy_set_wirespeed(tp);
2165         return 0;
2166 }
2167 
2168 static void tg3_frob_aux_power(struct tg3 *tp)
2169 {
2170         bool need_vaux = false;
2171 
2172         /* The GPIOs do something completely different on 57765. */
2173         if (!tg3_flag(tp, IS_NIC) ||
2174             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2175             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2176                 return;
2177 
2178         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2179              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2181              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2182             tp->pdev_peer != tp->pdev) {
2183                 struct net_device *dev_peer;
2184 
2185                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2186 
2187                 /* remove_one() may have been run on the peer. */
2188                 if (dev_peer) {
2189                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2190 
2191                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2192                                 return;
2193 
2194                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2195                             tg3_flag(tp_peer, ENABLE_ASF))
2196                                 need_vaux = true;
2197                 }
2198         }
2199 
2200         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2201                 need_vaux = true;
2202 
2203         if (need_vaux) {
2204                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2205                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2206                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2207                                     (GRC_LCLCTRL_GPIO_OE0 |
2208                                      GRC_LCLCTRL_GPIO_OE1 |
2209                                      GRC_LCLCTRL_GPIO_OE2 |
2210                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2211                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2212                                     100);
2213                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2214                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2215                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2216                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2217                                              GRC_LCLCTRL_GPIO_OE1 |
2218                                              GRC_LCLCTRL_GPIO_OE2 |
2219                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2220                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2221                                              tp->grc_local_ctrl;
2222                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223 
2224                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2225                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2226 
2227                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2228                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2229                 } else {
2230                         u32 no_gpio2;
2231                         u32 grc_local_ctrl = 0;
2232 
2233                         /* Workaround to prevent overdrawing Amps. */
2234                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2235                             ASIC_REV_5714) {
2236                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2237                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2238                                             grc_local_ctrl, 100);
2239                         }
2240 
2241                         /* On 5753 and variants, GPIO2 cannot be used. */
2242                         no_gpio2 = tp->nic_sram_data_cfg &
2243                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2244 
2245                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2246                                          GRC_LCLCTRL_GPIO_OE1 |
2247                                          GRC_LCLCTRL_GPIO_OE2 |
2248                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2249                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2250                         if (no_gpio2) {
2251                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2252                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2253                         }
2254                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2255                                                     grc_local_ctrl, 100);
2256 
2257                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2258 
2259                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2260                                                     grc_local_ctrl, 100);
2261 
2262                         if (!no_gpio2) {
2263                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2264                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2265                                             grc_local_ctrl, 100);
2266                         }
2267                 }
2268         } else {
2269                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2270                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2271                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2272                                     (GRC_LCLCTRL_GPIO_OE1 |
2273                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2274 
2275                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2276                                     GRC_LCLCTRL_GPIO_OE1, 100);
2277 
2278                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2279                                     (GRC_LCLCTRL_GPIO_OE1 |
2280                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2281                 }
2282         }
2283 }
2284 
2285 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2286 {
2287         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2288                 return 1;
2289         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2290                 if (speed != SPEED_10)
2291                         return 1;
2292         } else if (speed == SPEED_10)
2293                 return 1;
2294 
2295         return 0;
2296 }
2297 
2298 static int tg3_setup_phy(struct tg3 *, int);
2299 
2300 #define RESET_KIND_SHUTDOWN     0
2301 #define RESET_KIND_INIT         1
2302 #define RESET_KIND_SUSPEND      2
2303 
2304 static void tg3_write_sig_post_reset(struct tg3 *, int);
2305 static int tg3_halt_cpu(struct tg3 *, u32);
2306 
2307 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2308 {
2309         u32 val;
2310 
2311         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2312                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2313                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2314                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2315 
2316                         sg_dig_ctrl |=
2317                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2318                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2319                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2320                 }
2321                 return;
2322         }
2323 
2324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2325                 tg3_bmcr_reset(tp);
2326                 val = tr32(GRC_MISC_CFG);
2327                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2328                 udelay(40);
2329                 return;
2330         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2331                 u32 phytest;
2332                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2333                         u32 phy;
2334 
2335                         tg3_writephy(tp, MII_ADVERTISE, 0);
2336                         tg3_writephy(tp, MII_BMCR,
2337                                      BMCR_ANENABLE | BMCR_ANRESTART);
2338 
2339                         tg3_writephy(tp, MII_TG3_FET_TEST,
2340                                      phytest | MII_TG3_FET_SHADOW_EN);
2341                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2342                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2343                                 tg3_writephy(tp,
2344                                              MII_TG3_FET_SHDW_AUXMODE4,
2345                                              phy);
2346                         }
2347                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2348                 }
2349                 return;
2350         } else if (do_low_power) {
2351                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2352                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2353 
2354                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2355                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2356                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2357                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2358         }
2359 
2360         /* The PHY should not be powered down on some chips because
2361          * of bugs.
2362          */
2363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2366              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2367                 return;
2368 
2369         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2370             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2371                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2372                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2373                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2374                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2375         }
2376 
2377         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2378 }
2379 
2380 /* tp->lock is held. */
2381 static int tg3_nvram_lock(struct tg3 *tp)
2382 {
2383         if (tg3_flag(tp, NVRAM)) {
2384                 int i;
2385 
2386                 if (tp->nvram_lock_cnt == 0) {
2387                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2388                         for (i = 0; i < 8000; i++) {
2389                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2390                                         break;
2391                                 udelay(20);
2392                         }
2393                         if (i == 8000) {
2394                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2395                                 return -ENODEV;
2396                         }
2397                 }
2398                 tp->nvram_lock_cnt++;
2399         }
2400         return 0;
2401 }
2402 
2403 /* tp->lock is held. */
2404 static void tg3_nvram_unlock(struct tg3 *tp)
2405 {
2406         if (tg3_flag(tp, NVRAM)) {
2407                 if (tp->nvram_lock_cnt > 0)
2408                         tp->nvram_lock_cnt--;
2409                 if (tp->nvram_lock_cnt == 0)
2410                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2411         }
2412 }
2413 
2414 /* tp->lock is held. */
2415 static void tg3_enable_nvram_access(struct tg3 *tp)
2416 {
2417         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2418                 u32 nvaccess = tr32(NVRAM_ACCESS);
2419 
2420                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2421         }
2422 }
2423 
2424 /* tp->lock is held. */
2425 static void tg3_disable_nvram_access(struct tg3 *tp)
2426 {
2427         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2428                 u32 nvaccess = tr32(NVRAM_ACCESS);
2429 
2430                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2431         }
2432 }
2433 
2434 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2435                                         u32 offset, u32 *val)
2436 {
2437         u32 tmp;
2438         int i;
2439 
2440         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2441                 return -EINVAL;
2442 
2443         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2444                                         EEPROM_ADDR_DEVID_MASK |
2445                                         EEPROM_ADDR_READ);
2446         tw32(GRC_EEPROM_ADDR,
2447              tmp |
2448              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2449              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2450               EEPROM_ADDR_ADDR_MASK) |
2451              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2452 
2453         for (i = 0; i < 1000; i++) {
2454                 tmp = tr32(GRC_EEPROM_ADDR);
2455 
2456                 if (tmp & EEPROM_ADDR_COMPLETE)
2457                         break;
2458                 msleep(1);
2459         }
2460         if (!(tmp & EEPROM_ADDR_COMPLETE))
2461                 return -EBUSY;
2462 
2463         tmp = tr32(GRC_EEPROM_DATA);
2464 
2465         /*
2466          * The data will always be opposite the native endian
2467          * format.  Perform a blind byteswap to compensate.
2468          */
2469         *val = swab32(tmp);
2470 
2471         return 0;
2472 }
2473 
2474 #define NVRAM_CMD_TIMEOUT 10000
2475 
2476 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2477 {
2478         int i;
2479 
2480         tw32(NVRAM_CMD, nvram_cmd);
2481         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2482                 udelay(10);
2483                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2484                         udelay(10);
2485                         break;
2486                 }
2487         }
2488 
2489         if (i == NVRAM_CMD_TIMEOUT)
2490                 return -EBUSY;
2491 
2492         return 0;
2493 }
2494 
2495 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2496 {
2497         if (tg3_flag(tp, NVRAM) &&
2498             tg3_flag(tp, NVRAM_BUFFERED) &&
2499             tg3_flag(tp, FLASH) &&
2500             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2501             (tp->nvram_jedecnum == JEDEC_ATMEL))
2502 
2503                 addr = ((addr / tp->nvram_pagesize) <<
2504                         ATMEL_AT45DB0X1B_PAGE_POS) +
2505                        (addr % tp->nvram_pagesize);
2506 
2507         return addr;
2508 }
2509 
2510 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2511 {
2512         if (tg3_flag(tp, NVRAM) &&
2513             tg3_flag(tp, NVRAM_BUFFERED) &&
2514             tg3_flag(tp, FLASH) &&
2515             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2516             (tp->nvram_jedecnum == JEDEC_ATMEL))
2517 
2518                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2519                         tp->nvram_pagesize) +
2520                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2521 
2522         return addr;
2523 }
2524 
2525 /* NOTE: Data read in from NVRAM is byteswapped according to
2526  * the byteswapping settings for all other register accesses.
2527  * tg3 devices are BE devices, so on a BE machine, the data
2528  * returned will be exactly as it is seen in NVRAM.  On a LE
2529  * machine, the 32-bit value will be byteswapped.
2530  */
2531 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2532 {
2533         int ret;
2534 
2535         if (!tg3_flag(tp, NVRAM))
2536                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2537 
2538         offset = tg3_nvram_phys_addr(tp, offset);
2539 
2540         if (offset > NVRAM_ADDR_MSK)
2541                 return -EINVAL;
2542 
2543         ret = tg3_nvram_lock(tp);
2544         if (ret)
2545                 return ret;
2546 
2547         tg3_enable_nvram_access(tp);
2548 
2549         tw32(NVRAM_ADDR, offset);
2550         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2551                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2552 
2553         if (ret == 0)
2554                 *val = tr32(NVRAM_RDDATA);
2555 
2556         tg3_disable_nvram_access(tp);
2557 
2558         tg3_nvram_unlock(tp);
2559 
2560         return ret;
2561 }
2562 
2563 /* Ensures NVRAM data is in bytestream format. */
2564 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2565 {
2566         u32 v;
2567         int res = tg3_nvram_read(tp, offset, &v);
2568         if (!res)
2569                 *val = cpu_to_be32(v);
2570         return res;
2571 }
2572 
2573 /* tp->lock is held. */
2574 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2575 {
2576         u32 addr_high, addr_low;
2577         int i;
2578 
2579         addr_high = ((tp->dev->dev_addr[0] << 8) |
2580                      tp->dev->dev_addr[1]);
2581         addr_low = ((tp->dev->dev_addr[2] << 24) |
2582                     (tp->dev->dev_addr[3] << 16) |
2583                     (tp->dev->dev_addr[4] <<  8) |
2584                     (tp->dev->dev_addr[5] <<  0));
2585         for (i = 0; i < 4; i++) {
2586                 if (i == 1 && skip_mac_1)
2587                         continue;
2588                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2589                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2590         }
2591 
2592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2594                 for (i = 0; i < 12; i++) {
2595                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2596                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2597                 }
2598         }
2599 
2600         addr_high = (tp->dev->dev_addr[0] +
2601                      tp->dev->dev_addr[1] +
2602                      tp->dev->dev_addr[2] +
2603                      tp->dev->dev_addr[3] +
2604                      tp->dev->dev_addr[4] +
2605                      tp->dev->dev_addr[5]) &
2606                 TX_BACKOFF_SEED_MASK;
2607         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2608 }
2609 
2610 static void tg3_enable_register_access(struct tg3 *tp)
2611 {
2612         /*
2613          * Make sure register accesses (indirect or otherwise) will function
2614          * correctly.
2615          */
2616         pci_write_config_dword(tp->pdev,
2617                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2618 }
2619 
2620 static int tg3_power_up(struct tg3 *tp)
2621 {
2622         tg3_enable_register_access(tp);
2623 
2624         pci_set_power_state(tp->pdev, PCI_D0);
2625 
2626         /* Switch out of Vaux if it is a NIC */
2627         if (tg3_flag(tp, IS_NIC))
2628                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2629 
2630         return 0;
2631 }
2632 
2633 static int tg3_power_down_prepare(struct tg3 *tp)
2634 {
2635         u32 misc_host_ctrl;
2636         bool device_should_wake, do_low_power;
2637 
2638         tg3_enable_register_access(tp);
2639 
2640         /* Restore the CLKREQ setting. */
2641         if (tg3_flag(tp, CLKREQ_BUG)) {
2642                 u16 lnkctl;
2643 
2644                 pci_read_config_word(tp->pdev,
2645                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2646                                      &lnkctl);
2647                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2648                 pci_write_config_word(tp->pdev,
2649                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2650                                       lnkctl);
2651         }
2652 
2653         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2654         tw32(TG3PCI_MISC_HOST_CTRL,
2655              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2656 
2657         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2658                              tg3_flag(tp, WOL_ENABLE);
2659 
2660         if (tg3_flag(tp, USE_PHYLIB)) {
2661                 do_low_power = false;
2662                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2663                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2664                         struct phy_device *phydev;
2665                         u32 phyid, advertising;
2666 
2667                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2668 
2669                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2670 
2671                         tp->link_config.orig_speed = phydev->speed;
2672                         tp->link_config.orig_duplex = phydev->duplex;
2673                         tp->link_config.orig_autoneg = phydev->autoneg;
2674                         tp->link_config.orig_advertising = phydev->advertising;
2675 
2676                         advertising = ADVERTISED_TP |
2677                                       ADVERTISED_Pause |
2678                                       ADVERTISED_Autoneg |
2679                                       ADVERTISED_10baseT_Half;
2680 
2681                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2682                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2683                                         advertising |=
2684                                                 ADVERTISED_100baseT_Half |
2685                                                 ADVERTISED_100baseT_Full |
2686                                                 ADVERTISED_10baseT_Full;
2687                                 else
2688                                         advertising |= ADVERTISED_10baseT_Full;
2689                         }
2690 
2691                         phydev->advertising = advertising;
2692 
2693                         phy_start_aneg(phydev);
2694 
2695                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2696                         if (phyid != PHY_ID_BCMAC131) {
2697                                 phyid &= PHY_BCM_OUI_MASK;
2698                                 if (phyid == PHY_BCM_OUI_1 ||
2699                                     phyid == PHY_BCM_OUI_2 ||
2700                                     phyid == PHY_BCM_OUI_3)
2701                                         do_low_power = true;
2702                         }
2703                 }
2704         } else {
2705                 do_low_power = true;
2706 
2707                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2708                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2709                         tp->link_config.orig_speed = tp->link_config.speed;
2710                         tp->link_config.orig_duplex = tp->link_config.duplex;
2711                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2712                 }
2713 
2714                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2715                         tp->link_config.speed = SPEED_10;
2716                         tp->link_config.duplex = DUPLEX_HALF;
2717                         tp->link_config.autoneg = AUTONEG_ENABLE;
2718                         tg3_setup_phy(tp, 0);
2719                 }
2720         }
2721 
2722         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2723                 u32 val;
2724 
2725                 val = tr32(GRC_VCPU_EXT_CTRL);
2726                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2727         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2728                 int i;
2729                 u32 val;
2730 
2731                 for (i = 0; i < 200; i++) {
2732                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2733                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2734                                 break;
2735                         msleep(1);
2736                 }
2737         }
2738         if (tg3_flag(tp, WOL_CAP))
2739                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2740                                                      WOL_DRV_STATE_SHUTDOWN |
2741                                                      WOL_DRV_WOL |
2742                                                      WOL_SET_MAGIC_PKT);
2743 
2744         if (device_should_wake) {
2745                 u32 mac_mode;
2746 
2747                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2748                         if (do_low_power &&
2749                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2750                                 tg3_phy_auxctl_write(tp,
2751                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2752                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2753                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2754                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2755                                 udelay(40);
2756                         }
2757 
2758                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2759                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2760                         else
2761                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2762 
2763                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2764                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2765                             ASIC_REV_5700) {
2766                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2767                                              SPEED_100 : SPEED_10;
2768                                 if (tg3_5700_link_polarity(tp, speed))
2769                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2770                                 else
2771                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2772                         }
2773                 } else {
2774                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2775                 }
2776 
2777                 if (!tg3_flag(tp, 5750_PLUS))
2778                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2779 
2780                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2781                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2782                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2783                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2784 
2785                 if (tg3_flag(tp, ENABLE_APE))
2786                         mac_mode |= MAC_MODE_APE_TX_EN |
2787                                     MAC_MODE_APE_RX_EN |
2788                                     MAC_MODE_TDE_ENABLE;
2789 
2790                 tw32_f(MAC_MODE, mac_mode);
2791                 udelay(100);
2792 
2793                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2794                 udelay(10);
2795         }
2796 
2797         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2798             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2799              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2800                 u32 base_val;
2801 
2802                 base_val = tp->pci_clock_ctrl;
2803                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2804                              CLOCK_CTRL_TXCLK_DISABLE);
2805 
2806                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2807                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2808         } else if (tg3_flag(tp, 5780_CLASS) ||
2809                    tg3_flag(tp, CPMU_PRESENT) ||
2810                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2811                 /* do nothing */
2812         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2813                 u32 newbits1, newbits2;
2814 
2815                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2816                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2817                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2818                                     CLOCK_CTRL_TXCLK_DISABLE |
2819                                     CLOCK_CTRL_ALTCLK);
2820                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2821                 } else if (tg3_flag(tp, 5705_PLUS)) {
2822                         newbits1 = CLOCK_CTRL_625_CORE;
2823                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2824                 } else {
2825                         newbits1 = CLOCK_CTRL_ALTCLK;
2826                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2827                 }
2828 
2829                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2830                             40);
2831 
2832                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2833                             40);
2834 
2835                 if (!tg3_flag(tp, 5705_PLUS)) {
2836                         u32 newbits3;
2837 
2838                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2839                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2840                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2841                                             CLOCK_CTRL_TXCLK_DISABLE |
2842                                             CLOCK_CTRL_44MHZ_CORE);
2843                         } else {
2844                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2845                         }
2846 
2847                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2848                                     tp->pci_clock_ctrl | newbits3, 40);
2849                 }
2850         }
2851 
2852         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2853                 tg3_power_down_phy(tp, do_low_power);
2854 
2855         tg3_frob_aux_power(tp);
2856 
2857         /* Workaround for unstable PLL clock */
2858         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2859             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2860                 u32 val = tr32(0x7d00);
2861 
2862                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2863                 tw32(0x7d00, val);
2864                 if (!tg3_flag(tp, ENABLE_ASF)) {
2865                         int err;
2866 
2867                         err = tg3_nvram_lock(tp);
2868                         tg3_halt_cpu(tp, RX_CPU_BASE);
2869                         if (!err)
2870                                 tg3_nvram_unlock(tp);
2871                 }
2872         }
2873 
2874         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2875 
2876         return 0;
2877 }
2878 
2879 static void tg3_power_down(struct tg3 *tp)
2880 {
2881         tg3_power_down_prepare(tp);
2882 
2883         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2884         pci_set_power_state(tp->pdev, PCI_D3hot);
2885 }
2886 
2887 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2888 {
2889         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2890         case MII_TG3_AUX_STAT_10HALF:
2891                 *speed = SPEED_10;
2892                 *duplex = DUPLEX_HALF;
2893                 break;
2894 
2895         case MII_TG3_AUX_STAT_10FULL:
2896                 *speed = SPEED_10;
2897                 *duplex = DUPLEX_FULL;
2898                 break;
2899 
2900         case MII_TG3_AUX_STAT_100HALF:
2901                 *speed = SPEED_100;
2902                 *duplex = DUPLEX_HALF;
2903                 break;
2904 
2905         case MII_TG3_AUX_STAT_100FULL:
2906                 *speed = SPEED_100;
2907                 *duplex = DUPLEX_FULL;
2908                 break;
2909 
2910         case MII_TG3_AUX_STAT_1000HALF:
2911                 *speed = SPEED_1000;
2912                 *duplex = DUPLEX_HALF;
2913                 break;
2914 
2915         case MII_TG3_AUX_STAT_1000FULL:
2916                 *speed = SPEED_1000;
2917                 *duplex = DUPLEX_FULL;
2918                 break;
2919 
2920         default:
2921                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2922                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2923                                  SPEED_10;
2924                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2925                                   DUPLEX_HALF;
2926                         break;
2927                 }
2928                 *speed = SPEED_INVALID;
2929                 *duplex = DUPLEX_INVALID;
2930                 break;
2931         }
2932 }
2933 
2934 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2935 {
2936         int err = 0;
2937         u32 val, new_adv;
2938 
2939         new_adv = ADVERTISE_CSMA;
2940         if (advertise & ADVERTISED_10baseT_Half)
2941                 new_adv |= ADVERTISE_10HALF;
2942         if (advertise & ADVERTISED_10baseT_Full)
2943                 new_adv |= ADVERTISE_10FULL;
2944         if (advertise & ADVERTISED_100baseT_Half)
2945                 new_adv |= ADVERTISE_100HALF;
2946         if (advertise & ADVERTISED_100baseT_Full)
2947                 new_adv |= ADVERTISE_100FULL;
2948 
2949         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2950 
2951         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2952         if (err)
2953                 goto done;
2954 
2955         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2956                 goto done;
2957 
2958         new_adv = 0;
2959         if (advertise & ADVERTISED_1000baseT_Half)
2960                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2961         if (advertise & ADVERTISED_1000baseT_Full)
2962                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2963 
2964         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2965             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2966                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2967                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2968 
2969         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2970         if (err)
2971                 goto done;
2972 
2973         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2974                 goto done;
2975 
2976         tw32(TG3_CPMU_EEE_MODE,
2977              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2978 
2979         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2980         if (!err) {
2981                 u32 err2;
2982 
2983                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2984                 case ASIC_REV_5717:
2985                 case ASIC_REV_57765:
2986                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2987                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2988                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2989                         /* Fall through */
2990                 case ASIC_REV_5719:
2991                         val = MII_TG3_DSP_TAP26_ALNOKO |
2992                               MII_TG3_DSP_TAP26_RMRXSTO |
2993                               MII_TG3_DSP_TAP26_OPCSINPT;
2994                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2995                 }
2996 
2997                 val = 0;
2998                 /* Advertise 100-BaseTX EEE ability */
2999                 if (advertise & ADVERTISED_100baseT_Full)
3000                         val |= MDIO_AN_EEE_ADV_100TX;
3001                 /* Advertise 1000-BaseT EEE ability */
3002                 if (advertise & ADVERTISED_1000baseT_Full)
3003                         val |= MDIO_AN_EEE_ADV_1000T;
3004                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3005 
3006                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3007                 if (!err)
3008                         err = err2;
3009         }
3010 
3011 done:
3012         return err;
3013 }
3014 
3015 static void tg3_phy_copper_begin(struct tg3 *tp)
3016 {
3017         u32 new_adv;
3018         int i;
3019 
3020         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3021                 new_adv = ADVERTISED_10baseT_Half |
3022                           ADVERTISED_10baseT_Full;
3023                 if (tg3_flag(tp, WOL_SPEED_100MB))
3024                         new_adv |= ADVERTISED_100baseT_Half |
3025                                    ADVERTISED_100baseT_Full;
3026 
3027                 tg3_phy_autoneg_cfg(tp, new_adv,
3028                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3029         } else if (tp->link_config.speed == SPEED_INVALID) {
3030                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3031                         tp->link_config.advertising &=
3032                                 ~(ADVERTISED_1000baseT_Half |
3033                                   ADVERTISED_1000baseT_Full);
3034 
3035                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3036                                     tp->link_config.flowctrl);
3037         } else {
3038                 /* Asking for a specific link mode. */
3039                 if (tp->link_config.speed == SPEED_1000) {
3040                         if (tp->link_config.duplex == DUPLEX_FULL)
3041                                 new_adv = ADVERTISED_1000baseT_Full;
3042                         else
3043                                 new_adv = ADVERTISED_1000baseT_Half;
3044                 } else if (tp->link_config.speed == SPEED_100) {
3045                         if (tp->link_config.duplex == DUPLEX_FULL)
3046                                 new_adv = ADVERTISED_100baseT_Full;
3047                         else
3048                                 new_adv = ADVERTISED_100baseT_Half;
3049                 } else {
3050                         if (tp->link_config.duplex == DUPLEX_FULL)
3051                                 new_adv = ADVERTISED_10baseT_Full;
3052                         else
3053                                 new_adv = ADVERTISED_10baseT_Half;
3054                 }
3055 
3056                 tg3_phy_autoneg_cfg(tp, new_adv,
3057                                     tp->link_config.flowctrl);
3058         }
3059 
3060         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3061             tp->link_config.speed != SPEED_INVALID) {
3062                 u32 bmcr, orig_bmcr;
3063 
3064                 tp->link_config.active_speed = tp->link_config.speed;
3065                 tp->link_config.active_duplex = tp->link_config.duplex;
3066 
3067                 bmcr = 0;
3068                 switch (tp->link_config.speed) {
3069                 default:
3070                 case SPEED_10:
3071                         break;
3072 
3073                 case SPEED_100:
3074                         bmcr |= BMCR_SPEED100;
3075                         break;
3076 
3077                 case SPEED_1000:
3078                         bmcr |= TG3_BMCR_SPEED1000;
3079                         break;
3080                 }
3081 
3082                 if (tp->link_config.duplex == DUPLEX_FULL)
3083                         bmcr |= BMCR_FULLDPLX;
3084 
3085                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3086                     (bmcr != orig_bmcr)) {
3087                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3088                         for (i = 0; i < 1500; i++) {
3089                                 u32 tmp;
3090 
3091                                 udelay(10);
3092                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3093                                     tg3_readphy(tp, MII_BMSR, &tmp))
3094                                         continue;
3095                                 if (!(tmp & BMSR_LSTATUS)) {
3096                                         udelay(40);
3097                                         break;
3098                                 }
3099                         }
3100                         tg3_writephy(tp, MII_BMCR, bmcr);
3101                         udelay(40);
3102                 }
3103         } else {
3104                 tg3_writephy(tp, MII_BMCR,
3105                              BMCR_ANENABLE | BMCR_ANRESTART);
3106         }
3107 }
3108 
3109 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3110 {
3111         int err;
3112 
3113         /* Turn off tap power management. */
3114         /* Set Extended packet length bit */
3115         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3116 
3117         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3118         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3119         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3120         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3121         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3122 
3123         udelay(40);
3124 
3125         return err;
3126 }
3127 
3128 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3129 {
3130         u32 adv_reg, all_mask = 0;
3131 
3132         if (mask & ADVERTISED_10baseT_Half)
3133                 all_mask |= ADVERTISE_10HALF;
3134         if (mask & ADVERTISED_10baseT_Full)
3135                 all_mask |= ADVERTISE_10FULL;
3136         if (mask & ADVERTISED_100baseT_Half)
3137                 all_mask |= ADVERTISE_100HALF;
3138         if (mask & ADVERTISED_100baseT_Full)
3139                 all_mask |= ADVERTISE_100FULL;
3140 
3141         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3142                 return 0;
3143 
3144         if ((adv_reg & all_mask) != all_mask)
3145                 return 0;
3146         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3147                 u32 tg3_ctrl;
3148 
3149                 all_mask = 0;
3150                 if (mask & ADVERTISED_1000baseT_Half)
3151                         all_mask |= ADVERTISE_1000HALF;
3152                 if (mask & ADVERTISED_1000baseT_Full)
3153                         all_mask |= ADVERTISE_1000FULL;
3154 
3155                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3156                         return 0;
3157 
3158                 if ((tg3_ctrl & all_mask) != all_mask)
3159                         return 0;
3160         }
3161         return 1;
3162 }
3163 
3164 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3165 {
3166         u32 curadv, reqadv;
3167 
3168         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3169                 return 1;
3170 
3171         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3172         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3173 
3174         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3175                 if (curadv != reqadv)
3176                         return 0;
3177 
3178                 if (tg3_flag(tp, PAUSE_AUTONEG))
3179                         tg3_readphy(tp, MII_LPA, rmtadv);
3180         } else {
3181                 /* Reprogram the advertisement register, even if it
3182                  * does not affect the current link.  If the link
3183                  * gets renegotiated in the future, we can save an
3184                  * additional renegotiation cycle by advertising
3185                  * it correctly in the first place.
3186                  */
3187                 if (curadv != reqadv) {
3188                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3189                                      ADVERTISE_PAUSE_ASYM);
3190                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3191                 }
3192         }
3193 
3194         return 1;
3195 }
3196 
3197 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3198 {
3199         int current_link_up;
3200         u32 bmsr, val;
3201         u32 lcl_adv, rmt_adv;
3202         u16 current_speed;
3203         u8 current_duplex;
3204         int i, err;
3205 
3206         tw32(MAC_EVENT, 0);
3207 
3208         tw32_f(MAC_STATUS,
3209              (MAC_STATUS_SYNC_CHANGED |
3210               MAC_STATUS_CFG_CHANGED |
3211               MAC_STATUS_MI_COMPLETION |
3212               MAC_STATUS_LNKSTATE_CHANGED));
3213         udelay(40);
3214 
3215         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3216                 tw32_f(MAC_MI_MODE,
3217                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3218                 udelay(80);
3219         }
3220 
3221         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3222 
3223         /* Some third-party PHYs need to be reset on link going
3224          * down.
3225          */
3226         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3227              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3228              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3229             netif_carrier_ok(tp->dev)) {
3230                 tg3_readphy(tp, MII_BMSR, &bmsr);
3231                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3232                     !(bmsr & BMSR_LSTATUS))
3233                         force_reset = 1;
3234         }
3235         if (force_reset)
3236                 tg3_phy_reset(tp);
3237 
3238         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3239                 tg3_readphy(tp, MII_BMSR, &bmsr);
3240                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3241                     !tg3_flag(tp, INIT_COMPLETE))
3242                         bmsr = 0;
3243 
3244                 if (!(bmsr & BMSR_LSTATUS)) {
3245                         err = tg3_init_5401phy_dsp(tp);
3246                         if (err)
3247                                 return err;
3248 
3249                         tg3_readphy(tp, MII_BMSR, &bmsr);
3250                         for (i = 0; i < 1000; i++) {
3251                                 udelay(10);
3252                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3253                                     (bmsr & BMSR_LSTATUS)) {
3254                                         udelay(40);
3255                                         break;
3256                                 }
3257                         }
3258 
3259                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3260                             TG3_PHY_REV_BCM5401_B0 &&
3261                             !(bmsr & BMSR_LSTATUS) &&
3262                             tp->link_config.active_speed == SPEED_1000) {
3263                                 err = tg3_phy_reset(tp);
3264                                 if (!err)
3265                                         err = tg3_init_5401phy_dsp(tp);
3266                                 if (err)
3267                                         return err;
3268                         }
3269                 }
3270         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3271                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3272                 /* 5701 {A0,B0} CRC bug workaround */
3273                 tg3_writephy(tp, 0x15, 0x0a75);
3274                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3275                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3276                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3277         }
3278 
3279         /* Clear pending interrupts... */
3280         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3281         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282 
3283         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3284                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3285         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3286                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3287 
3288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3290                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3291                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3292                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3293                 else
3294                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3295         }
3296 
3297         current_link_up = 0;
3298         current_speed = SPEED_INVALID;
3299         current_duplex = DUPLEX_INVALID;
3300 
3301         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3302                 err = tg3_phy_auxctl_read(tp,
3303                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3304                                           &val);
3305                 if (!err && !(val & (1 << 10))) {
3306                         tg3_phy_auxctl_write(tp,
3307                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3308                                              val | (1 << 10));
3309                         goto relink;
3310                 }
3311         }
3312 
3313         bmsr = 0;
3314         for (i = 0; i < 100; i++) {
3315                 tg3_readphy(tp, MII_BMSR, &bmsr);
3316                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3317                     (bmsr & BMSR_LSTATUS))
3318                         break;
3319                 udelay(40);
3320         }
3321 
3322         if (bmsr & BMSR_LSTATUS) {
3323                 u32 aux_stat, bmcr;
3324 
3325                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3326                 for (i = 0; i < 2000; i++) {
3327                         udelay(10);
3328                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3329                             aux_stat)
3330                                 break;
3331                 }
3332 
3333                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3334                                              &current_speed,
3335                                              &current_duplex);
3336 
3337                 bmcr = 0;
3338                 for (i = 0; i < 200; i++) {
3339                         tg3_readphy(tp, MII_BMCR, &bmcr);
3340                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3341                                 continue;
3342                         if (bmcr && bmcr != 0x7fff)
3343                                 break;
3344                         udelay(10);
3345                 }
3346 
3347                 lcl_adv = 0;
3348                 rmt_adv = 0;
3349 
3350                 tp->link_config.active_speed = current_speed;
3351                 tp->link_config.active_duplex = current_duplex;
3352 
3353                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3354                         if ((bmcr & BMCR_ANENABLE) &&
3355                             tg3_copper_is_advertising_all(tp,
3356                                                 tp->link_config.advertising)) {
3357                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3358                                                                   &rmt_adv))
3359                                         current_link_up = 1;
3360                         }
3361                 } else {
3362                         if (!(bmcr & BMCR_ANENABLE) &&
3363                             tp->link_config.speed == current_speed &&
3364                             tp->link_config.duplex == current_duplex &&
3365                             tp->link_config.flowctrl ==
3366                             tp->link_config.active_flowctrl) {
3367                                 current_link_up = 1;
3368                         }
3369                 }
3370 
3371                 if (current_link_up == 1 &&
3372                     tp->link_config.active_duplex == DUPLEX_FULL)
3373                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3374         }
3375 
3376 relink:
3377         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3378                 tg3_phy_copper_begin(tp);
3379 
3380                 tg3_readphy(tp, MII_BMSR, &bmsr);
3381                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3382                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3383                         current_link_up = 1;
3384         }
3385 
3386         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3387         if (current_link_up == 1) {
3388                 if (tp->link_config.active_speed == SPEED_100 ||
3389                     tp->link_config.active_speed == SPEED_10)
3390                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3391                 else
3392                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3393         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3394                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3395         else
3396                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3397 
3398         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3399         if (tp->link_config.active_duplex == DUPLEX_HALF)
3400                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3401 
3402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3403                 if (current_link_up == 1 &&
3404                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3405                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3406                 else
3407                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3408         }
3409 
3410         /* ??? Without this setting Netgear GA302T PHY does not
3411          * ??? send/receive packets...
3412          */
3413         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3414             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3415                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3416                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3417                 udelay(80);
3418         }
3419 
3420         tw32_f(MAC_MODE, tp->mac_mode);
3421         udelay(40);
3422 
3423         tg3_phy_eee_adjust(tp, current_link_up);
3424 
3425         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3426                 /* Polled via timer. */
3427                 tw32_f(MAC_EVENT, 0);
3428         } else {
3429                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3430         }
3431         udelay(40);
3432 
3433         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3434             current_link_up == 1 &&
3435             tp->link_config.active_speed == SPEED_1000 &&
3436             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3437                 udelay(120);
3438                 tw32_f(MAC_STATUS,
3439                      (MAC_STATUS_SYNC_CHANGED |
3440                       MAC_STATUS_CFG_CHANGED));
3441                 udelay(40);
3442                 tg3_write_mem(tp,
3443                               NIC_SRAM_FIRMWARE_MBOX,
3444                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3445         }
3446 
3447         /* Prevent send BD corruption. */
3448         if (tg3_flag(tp, CLKREQ_BUG)) {
3449                 u16 oldlnkctl, newlnkctl;
3450 
3451                 pci_read_config_word(tp->pdev,
3452                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3453                                      &oldlnkctl);
3454                 if (tp->link_config.active_speed == SPEED_100 ||
3455                     tp->link_config.active_speed == SPEED_10)
3456                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3457                 else
3458                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3459                 if (newlnkctl != oldlnkctl)
3460                         pci_write_config_word(tp->pdev,
3461                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3462                                               newlnkctl);
3463         }
3464 
3465         if (current_link_up != netif_carrier_ok(tp->dev)) {
3466                 if (current_link_up)
3467                         netif_carrier_on(tp->dev);
3468                 else
3469                         netif_carrier_off(tp->dev);
3470                 tg3_link_report(tp);
3471         }
3472 
3473         return 0;
3474 }
3475 
3476 struct tg3_fiber_aneginfo {
3477         int state;
3478 #define ANEG_STATE_UNKNOWN              0
3479 #define ANEG_STATE_AN_ENABLE            1
3480 #define ANEG_STATE_RESTART_INIT         2
3481 #define ANEG_STATE_RESTART              3
3482 #define ANEG_STATE_DISABLE_LINK_OK      4
3483 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3484 #define ANEG_STATE_ABILITY_DETECT       6
3485 #define ANEG_STATE_ACK_DETECT_INIT      7
3486 #define ANEG_STATE_ACK_DETECT           8
3487 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3488 #define ANEG_STATE_COMPLETE_ACK         10
3489 #define ANEG_STATE_IDLE_DETECT_INIT     11
3490 #define ANEG_STATE_IDLE_DETECT          12
3491 #define ANEG_STATE_LINK_OK              13
3492 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3493 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3494 
3495         u32 flags;
3496 #define MR_AN_ENABLE            0x00000001
3497 #define MR_RESTART_AN           0x00000002
3498 #define MR_AN_COMPLETE          0x00000004
3499 #define MR_PAGE_RX              0x00000008
3500 #define MR_NP_LOADED            0x00000010
3501 #define MR_TOGGLE_TX            0x00000020
3502 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3503 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3504 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3505 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3506 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3507 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3508 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3509 #define MR_TOGGLE_RX            0x00002000
3510 #define MR_NP_RX                0x00004000
3511 
3512 #define MR_LINK_OK              0x80000000
3513 
3514         unsigned long link_time, cur_time;
3515 
3516         u32 ability_match_cfg;
3517         int ability_match_count;
3518 
3519         char ability_match, idle_match, ack_match;
3520 
3521         u32 txconfig, rxconfig;
3522 #define ANEG_CFG_NP             0x00000080
3523 #define ANEG_CFG_ACK            0x00000040
3524 #define ANEG_CFG_RF2            0x00000020
3525 #define ANEG_CFG_RF1            0x00000010
3526 #define ANEG_CFG_PS2            0x00000001
3527 #define ANEG_CFG_PS1            0x00008000
3528 #define ANEG_CFG_HD             0x00004000
3529 #define ANEG_CFG_FD             0x00002000
3530 #define ANEG_CFG_INVAL          0x00001f06
3531 
3532 };
3533 #define ANEG_OK         0
3534 #define ANEG_DONE       1
3535 #define ANEG_TIMER_ENAB 2
3536 #define ANEG_FAILED     -1
3537 
3538 #define ANEG_STATE_SETTLE_TIME  10000
3539 
3540 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3541                                    struct tg3_fiber_aneginfo *ap)
3542 {
3543         u16 flowctrl;
3544         unsigned long delta;
3545         u32 rx_cfg_reg;
3546         int ret;
3547 
3548         if (ap->state == ANEG_STATE_UNKNOWN) {
3549                 ap->rxconfig = 0;
3550                 ap->link_time = 0;
3551                 ap->cur_time = 0;
3552                 ap->ability_match_cfg = 0;
3553                 ap->ability_match_count = 0;
3554                 ap->ability_match = 0;
3555                 ap->idle_match = 0;
3556                 ap->ack_match = 0;
3557         }
3558         ap->cur_time++;
3559 
3560         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3561                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3562 
3563                 if (rx_cfg_reg != ap->ability_match_cfg) {
3564                         ap->ability_match_cfg = rx_cfg_reg;
3565                         ap->ability_match = 0;
3566                         ap->ability_match_count = 0;
3567                 } else {
3568                         if (++ap->ability_match_count > 1) {
3569                                 ap->ability_match = 1;
3570                                 ap->ability_match_cfg = rx_cfg_reg;
3571                         }
3572                 }
3573                 if (rx_cfg_reg & ANEG_CFG_ACK)
3574                         ap->ack_match = 1;
3575                 else
3576                         ap->ack_match = 0;
3577 
3578                 ap->idle_match = 0;
3579         } else {
3580                 ap->idle_match = 1;
3581                 ap->ability_match_cfg = 0;
3582                 ap->ability_match_count = 0;
3583                 ap->ability_match = 0;
3584                 ap->ack_match = 0;
3585 
3586                 rx_cfg_reg = 0;
3587         }
3588 
3589         ap->rxconfig = rx_cfg_reg;
3590         ret = ANEG_OK;
3591 
3592         switch (ap->state) {
3593         case ANEG_STATE_UNKNOWN:
3594                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3595                         ap->state = ANEG_STATE_AN_ENABLE;
3596 
3597                 /* fallthru */
3598         case ANEG_STATE_AN_ENABLE:
3599                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3600                 if (ap->flags & MR_AN_ENABLE) {
3601                         ap->link_time = 0;
3602                         ap->cur_time = 0;
3603                         ap->ability_match_cfg = 0;
3604                         ap->ability_match_count = 0;
3605                         ap->ability_match = 0;
3606                         ap->idle_match = 0;
3607                         ap->ack_match = 0;
3608 
3609                         ap->state = ANEG_STATE_RESTART_INIT;
3610                 } else {
3611                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3612                 }
3613                 break;
3614 
3615         case ANEG_STATE_RESTART_INIT:
3616                 ap->link_time = ap->cur_time;
3617                 ap->flags &= ~(MR_NP_LOADED);
3618                 ap->txconfig = 0;
3619                 tw32(MAC_TX_AUTO_NEG, 0);
3620                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3621                 tw32_f(MAC_MODE, tp->mac_mode);
3622                 udelay(40);
3623 
3624                 ret = ANEG_TIMER_ENAB;
3625                 ap->state = ANEG_STATE_RESTART;
3626 
3627                 /* fallthru */
3628         case ANEG_STATE_RESTART:
3629                 delta = ap->cur_time - ap->link_time;
3630                 if (delta > ANEG_STATE_SETTLE_TIME)
3631                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3632                 else
3633                         ret = ANEG_TIMER_ENAB;
3634                 break;
3635 
3636         case ANEG_STATE_DISABLE_LINK_OK:
3637                 ret = ANEG_DONE;
3638                 break;
3639 
3640         case ANEG_STATE_ABILITY_DETECT_INIT:
3641                 ap->flags &= ~(MR_TOGGLE_TX);
3642                 ap->txconfig = ANEG_CFG_FD;
3643                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3644                 if (flowctrl & ADVERTISE_1000XPAUSE)
3645                         ap->txconfig |= ANEG_CFG_PS1;
3646                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3647                         ap->txconfig |= ANEG_CFG_PS2;
3648                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3649                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3650                 tw32_f(MAC_MODE, tp->mac_mode);
3651                 udelay(40);
3652 
3653                 ap->state = ANEG_STATE_ABILITY_DETECT;
3654                 break;
3655 
3656         case ANEG_STATE_ABILITY_DETECT:
3657                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3658                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3659                 break;
3660 
3661         case ANEG_STATE_ACK_DETECT_INIT:
3662                 ap->txconfig |= ANEG_CFG_ACK;
3663                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3664                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3665                 tw32_f(MAC_MODE, tp->mac_mode);
3666                 udelay(40);
3667 
3668                 ap->state = ANEG_STATE_ACK_DETECT;
3669 
3670                 /* fallthru */
3671         case ANEG_STATE_ACK_DETECT:
3672                 if (ap->ack_match != 0) {
3673                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3674                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3675                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3676                         } else {
3677                                 ap->state = ANEG_STATE_AN_ENABLE;
3678                         }
3679                 } else if (ap->ability_match != 0 &&
3680                            ap->rxconfig == 0) {
3681                         ap->state = ANEG_STATE_AN_ENABLE;
3682                 }
3683                 break;
3684 
3685         case ANEG_STATE_COMPLETE_ACK_INIT:
3686                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3687                         ret = ANEG_FAILED;
3688                         break;
3689                 }
3690                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3691                                MR_LP_ADV_HALF_DUPLEX |
3692                                MR_LP_ADV_SYM_PAUSE |
3693                                MR_LP_ADV_ASYM_PAUSE |
3694                                MR_LP_ADV_REMOTE_FAULT1 |
3695                                MR_LP_ADV_REMOTE_FAULT2 |
3696                                MR_LP_ADV_NEXT_PAGE |
3697                                MR_TOGGLE_RX |
3698                                MR_NP_RX);
3699                 if (ap->rxconfig & ANEG_CFG_FD)
3700                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3701                 if (ap->rxconfig & ANEG_CFG_HD)
3702                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3703                 if (ap->rxconfig & ANEG_CFG_PS1)
3704                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3705                 if (ap->rxconfig & ANEG_CFG_PS2)
3706                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3707                 if (ap->rxconfig & ANEG_CFG_RF1)
3708                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3709                 if (ap->rxconfig & ANEG_CFG_RF2)
3710                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3711                 if (ap->rxconfig & ANEG_CFG_NP)
3712                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3713 
3714                 ap->link_time = ap->cur_time;
3715 
3716                 ap->flags ^= (MR_TOGGLE_TX);
3717                 if (ap->rxconfig & 0x0008)
3718                         ap->flags |= MR_TOGGLE_RX;
3719                 if (ap->rxconfig & ANEG_CFG_NP)
3720                         ap->flags |= MR_NP_RX;
3721                 ap->flags |= MR_PAGE_RX;
3722 
3723                 ap->state = ANEG_STATE_COMPLETE_ACK;
3724                 ret = ANEG_TIMER_ENAB;
3725                 break;
3726 
3727         case ANEG_STATE_COMPLETE_ACK:
3728                 if (ap->ability_match != 0 &&
3729                     ap->rxconfig == 0) {
3730                         ap->state = ANEG_STATE_AN_ENABLE;
3731                         break;
3732                 }
3733                 delta = ap->cur_time - ap->link_time;
3734                 if (delta > ANEG_STATE_SETTLE_TIME) {
3735                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3736                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3737                         } else {
3738                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3739                                     !(ap->flags & MR_NP_RX)) {
3740                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3741                                 } else {
3742                                         ret = ANEG_FAILED;
3743                                 }
3744                         }
3745                 }
3746                 break;
3747 
3748         case ANEG_STATE_IDLE_DETECT_INIT:
3749                 ap->link_time = ap->cur_time;
3750                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3751                 tw32_f(MAC_MODE, tp->mac_mode);
3752                 udelay(40);
3753 
3754                 ap->state = ANEG_STATE_IDLE_DETECT;
3755                 ret = ANEG_TIMER_ENAB;
3756                 break;
3757 
3758         case ANEG_STATE_IDLE_DETECT:
3759                 if (ap->ability_match != 0 &&
3760                     ap->rxconfig == 0) {
3761                         ap->state = ANEG_STATE_AN_ENABLE;
3762                         break;
3763                 }
3764                 delta = ap->cur_time - ap->link_time;
3765                 if (delta > ANEG_STATE_SETTLE_TIME) {
3766                         /* XXX another gem from the Broadcom driver :( */
3767                         ap->state = ANEG_STATE_LINK_OK;
3768                 }
3769                 break;
3770 
3771         case ANEG_STATE_LINK_OK:
3772                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3773                 ret = ANEG_DONE;
3774                 break;
3775 
3776         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3777                 /* ??? unimplemented */
3778                 break;
3779 
3780         case ANEG_STATE_NEXT_PAGE_WAIT:
3781                 /* ??? unimplemented */
3782                 break;
3783 
3784         default:
3785                 ret = ANEG_FAILED;
3786                 break;
3787         }
3788 
3789         return ret;
3790 }
3791 
3792 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3793 {
3794         int res = 0;
3795         struct tg3_fiber_aneginfo aninfo;
3796         int status = ANEG_FAILED;
3797         unsigned int tick;
3798         u32 tmp;
3799 
3800         tw32_f(MAC_TX_AUTO_NEG, 0);
3801 
3802         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3803         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3804         udelay(40);
3805 
3806         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3807         udelay(40);
3808 
3809         memset(&aninfo, 0, sizeof(aninfo));
3810         aninfo.flags |= MR_AN_ENABLE;
3811         aninfo.state = ANEG_STATE_UNKNOWN;
3812         aninfo.cur_time = 0;
3813         tick = 0;
3814         while (++tick < 195000) {
3815                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3816                 if (status == ANEG_DONE || status == ANEG_FAILED)
3817                         break;
3818 
3819                 udelay(1);
3820         }
3821 
3822         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3823         tw32_f(MAC_MODE, tp->mac_mode);
3824         udelay(40);
3825 
3826         *txflags = aninfo.txconfig;
3827         *rxflags = aninfo.flags;
3828 
3829         if (status == ANEG_DONE &&
3830             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3831                              MR_LP_ADV_FULL_DUPLEX)))
3832                 res = 1;
3833 
3834         return res;
3835 }
3836 
3837 static void tg3_init_bcm8002(struct tg3 *tp)
3838 {
3839         u32 mac_status = tr32(MAC_STATUS);
3840         int i;
3841 
3842         /* Reset when initting first time or we have a link. */
3843         if (tg3_flag(tp, INIT_COMPLETE) &&
3844             !(mac_status & MAC_STATUS_PCS_SYNCED))
3845                 return;
3846 
3847         /* Set PLL lock range. */
3848         tg3_writephy(tp, 0x16, 0x8007);
3849 
3850         /* SW reset */
3851         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3852 
3853         /* Wait for reset to complete. */
3854         /* XXX schedule_timeout() ... */
3855         for (i = 0; i < 500; i++)
3856                 udelay(10);
3857 
3858         /* Config mode; select PMA/Ch 1 regs. */
3859         tg3_writephy(tp, 0x10, 0x8411);
3860 
3861         /* Enable auto-lock and comdet, select txclk for tx. */
3862         tg3_writephy(tp, 0x11, 0x0a10);
3863 
3864         tg3_writephy(tp, 0x18, 0x00a0);
3865         tg3_writephy(tp, 0x16, 0x41ff);
3866 
3867         /* Assert and deassert POR. */
3868         tg3_writephy(tp, 0x13, 0x0400);
3869         udelay(40);
3870         tg3_writephy(tp, 0x13, 0x0000);
3871 
3872         tg3_writephy(tp, 0x11, 0x0a50);
3873         udelay(40);
3874         tg3_writephy(tp, 0x11, 0x0a10);
3875 
3876         /* Wait for signal to stabilize */
3877         /* XXX schedule_timeout() ... */
3878         for (i = 0; i < 15000; i++)
3879                 udelay(10);
3880 
3881         /* Deselect the channel register so we can read the PHYID
3882          * later.
3883          */
3884         tg3_writephy(tp, 0x10, 0x8011);
3885 }
3886 
3887 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3888 {
3889         u16 flowctrl;
3890         u32 sg_dig_ctrl, sg_dig_status;
3891         u32 serdes_cfg, expected_sg_dig_ctrl;
3892         int workaround, port_a;
3893         int current_link_up;
3894 
3895         serdes_cfg = 0;
3896         expected_sg_dig_ctrl = 0;
3897         workaround = 0;
3898         port_a = 1;
3899         current_link_up = 0;
3900 
3901         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3902             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3903                 workaround = 1;
3904                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3905                         port_a = 0;
3906 
3907                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3908                 /* preserve bits 20-23 for voltage regulator */
3909                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3910         }
3911 
3912         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3913 
3914         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3915                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3916                         if (workaround) {
3917                                 u32 val = serdes_cfg;
3918 
3919                                 if (port_a)
3920                                         val |= 0xc010000;
3921                                 else
3922                                         val |= 0x4010000;
3923                                 tw32_f(MAC_SERDES_CFG, val);
3924                         }
3925 
3926                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3927                 }
3928                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3929                         tg3_setup_flow_control(tp, 0, 0);
3930                         current_link_up = 1;
3931                 }
3932                 goto out;
3933         }
3934 
3935         /* Want auto-negotiation.  */
3936         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3937 
3938         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3939         if (flowctrl & ADVERTISE_1000XPAUSE)
3940                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3941         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3942                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3943 
3944         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3945                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3946                     tp->serdes_counter &&
3947                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3948                                     MAC_STATUS_RCVD_CFG)) ==
3949                      MAC_STATUS_PCS_SYNCED)) {
3950                         tp->serdes_counter--;
3951                         current_link_up = 1;
3952                         goto out;
3953                 }
3954 restart_autoneg:
3955                 if (workaround)
3956                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3957                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3958                 udelay(5);
3959                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3960 
3961                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3962                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3963         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3964                                  MAC_STATUS_SIGNAL_DET)) {
3965                 sg_dig_status = tr32(SG_DIG_STATUS);
3966                 mac_status = tr32(MAC_STATUS);
3967 
3968                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3969                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3970                         u32 local_adv = 0, remote_adv = 0;
3971 
3972                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3973                                 local_adv |= ADVERTISE_1000XPAUSE;
3974                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3975                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3976 
3977                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3978                                 remote_adv |= LPA_1000XPAUSE;
3979                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3980                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3981 
3982                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3983                         current_link_up = 1;
3984                         tp->serdes_counter = 0;
3985                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3986                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3987                         if (tp->serdes_counter)
3988                                 tp->serdes_counter--;
3989                         else {
3990                                 if (workaround) {
3991                                         u32 val = serdes_cfg;
3992 
3993                                         if (port_a)
3994                                                 val |= 0xc010000;
3995                                         else
3996                                                 val |= 0x4010000;
3997 
3998                                         tw32_f(MAC_SERDES_CFG, val);
3999                                 }
4000 
4001                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4002                                 udelay(40);
4003 
4004                                 /* Link parallel detection - link is up */
4005                                 /* only if we have PCS_SYNC and not */
4006                                 /* receiving config code words */
4007                                 mac_status = tr32(MAC_STATUS);
4008                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4009                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4010                                         tg3_setup_flow_control(tp, 0, 0);
4011                                         current_link_up = 1;
4012                                         tp->phy_flags |=
4013                                                 TG3_PHYFLG_PARALLEL_DETECT;
4014                                         tp->serdes_counter =
4015                                                 SERDES_PARALLEL_DET_TIMEOUT;
4016                                 } else
4017                                         goto restart_autoneg;
4018                         }
4019                 }
4020         } else {
4021                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4022                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4023         }
4024 
4025 out:
4026         return current_link_up;
4027 }
4028 
4029 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4030 {
4031         int current_link_up = 0;
4032 
4033         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4034                 goto out;
4035 
4036         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4037                 u32 txflags, rxflags;
4038                 int i;
4039 
4040                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4041                         u32 local_adv = 0, remote_adv = 0;
4042 
4043                         if (txflags & ANEG_CFG_PS1)
4044                                 local_adv |= ADVERTISE_1000XPAUSE;
4045                         if (txflags & ANEG_CFG_PS2)
4046                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4047 
4048                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4049                                 remote_adv |= LPA_1000XPAUSE;
4050                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4051                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4052 
4053                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4054 
4055                         current_link_up = 1;
4056                 }
4057                 for (i = 0; i < 30; i++) {
4058                         udelay(20);
4059                         tw32_f(MAC_STATUS,
4060                                (MAC_STATUS_SYNC_CHANGED |
4061                                 MAC_STATUS_CFG_CHANGED));
4062                         udelay(40);
4063                         if ((tr32(MAC_STATUS) &
4064                              (MAC_STATUS_SYNC_CHANGED |
4065                               MAC_STATUS_CFG_CHANGED)) == 0)
4066                                 break;
4067                 }
4068 
4069                 mac_status = tr32(MAC_STATUS);
4070                 if (current_link_up == 0 &&
4071                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4072                     !(mac_status & MAC_STATUS_RCVD_CFG))
4073                         current_link_up = 1;
4074         } else {
4075                 tg3_setup_flow_control(tp, 0, 0);
4076 
4077                 /* Forcing 1000FD link up. */
4078                 current_link_up = 1;
4079 
4080                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4081                 udelay(40);
4082 
4083                 tw32_f(MAC_MODE, tp->mac_mode);
4084                 udelay(40);
4085         }
4086 
4087 out:
4088         return current_link_up;
4089 }
4090 
4091 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4092 {
4093         u32 orig_pause_cfg;
4094         u16 orig_active_speed;
4095         u8 orig_active_duplex;
4096         u32 mac_status;
4097         int current_link_up;
4098         int i;
4099 
4100         orig_pause_cfg = tp->link_config.active_flowctrl;
4101         orig_active_speed = tp->link_config.active_speed;
4102         orig_active_duplex = tp->link_config.active_duplex;
4103 
4104         if (!tg3_flag(tp, HW_AUTONEG) &&
4105             netif_carrier_ok(tp->dev) &&
4106             tg3_flag(tp, INIT_COMPLETE)) {
4107                 mac_status = tr32(MAC_STATUS);
4108                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4109                                MAC_STATUS_SIGNAL_DET |
4110                                MAC_STATUS_CFG_CHANGED |
4111                                MAC_STATUS_RCVD_CFG);
4112                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4113                                    MAC_STATUS_SIGNAL_DET)) {
4114                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4115                                             MAC_STATUS_CFG_CHANGED));
4116                         return 0;
4117                 }
4118         }
4119 
4120         tw32_f(MAC_TX_AUTO_NEG, 0);
4121 
4122         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4123         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4124         tw32_f(MAC_MODE, tp->mac_mode);
4125         udelay(40);
4126 
4127         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4128                 tg3_init_bcm8002(tp);
4129 
4130         /* Enable link change event even when serdes polling.  */
4131         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4132         udelay(40);
4133 
4134         current_link_up = 0;
4135         mac_status = tr32(MAC_STATUS);
4136 
4137         if (tg3_flag(tp, HW_AUTONEG))
4138                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4139         else
4140                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4141 
4142         tp->napi[0].hw_status->status =
4143                 (SD_STATUS_UPDATED |
4144                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4145 
4146         for (i = 0; i < 100; i++) {
4147                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4148                                     MAC_STATUS_CFG_CHANGED));
4149                 udelay(5);
4150                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4151                                          MAC_STATUS_CFG_CHANGED |
4152                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4153                         break;
4154         }
4155 
4156         mac_status = tr32(MAC_STATUS);
4157         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4158                 current_link_up = 0;
4159                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4160                     tp->serdes_counter == 0) {
4161                         tw32_f(MAC_MODE, (tp->mac_mode |
4162                                           MAC_MODE_SEND_CONFIGS));
4163                         udelay(1);
4164                         tw32_f(MAC_MODE, tp->mac_mode);
4165                 }
4166         }
4167 
4168         if (current_link_up == 1) {
4169                 tp->link_config.active_speed = SPEED_1000;
4170                 tp->link_config.active_duplex = DUPLEX_FULL;
4171                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4172                                     LED_CTRL_LNKLED_OVERRIDE |
4173                                     LED_CTRL_1000MBPS_ON));
4174         } else {
4175                 tp->link_config.active_speed = SPEED_INVALID;
4176                 tp->link_config.active_duplex = DUPLEX_INVALID;
4177                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4178                                     LED_CTRL_LNKLED_OVERRIDE |
4179                                     LED_CTRL_TRAFFIC_OVERRIDE));
4180         }
4181 
4182         if (current_link_up != netif_carrier_ok(tp->dev)) {
4183                 if (current_link_up)
4184                         netif_carrier_on(tp->dev);
4185                 else
4186                         netif_carrier_off(tp->dev);
4187                 tg3_link_report(tp);
4188         } else {
4189                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4190                 if (orig_pause_cfg != now_pause_cfg ||
4191                     orig_active_speed != tp->link_config.active_speed ||
4192                     orig_active_duplex != tp->link_config.active_duplex)
4193                         tg3_link_report(tp);
4194         }
4195 
4196         return 0;
4197 }
4198 
4199 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4200 {
4201         int current_link_up, err = 0;
4202         u32 bmsr, bmcr;
4203         u16 current_speed;
4204         u8 current_duplex;
4205         u32 local_adv, remote_adv;
4206 
4207         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4208         tw32_f(MAC_MODE, tp->mac_mode);
4209         udelay(40);
4210 
4211         tw32(MAC_EVENT, 0);
4212 
4213         tw32_f(MAC_STATUS,
4214              (MAC_STATUS_SYNC_CHANGED |
4215               MAC_STATUS_CFG_CHANGED |
4216               MAC_STATUS_MI_COMPLETION |
4217               MAC_STATUS_LNKSTATE_CHANGED));
4218         udelay(40);
4219 
4220         if (force_reset)
4221                 tg3_phy_reset(tp);
4222 
4223         current_link_up = 0;
4224         current_speed = SPEED_INVALID;
4225         current_duplex = DUPLEX_INVALID;
4226 
4227         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4228         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4230                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4231                         bmsr |= BMSR_LSTATUS;
4232                 else
4233                         bmsr &= ~BMSR_LSTATUS;
4234         }
4235 
4236         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4237 
4238         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4239             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4240                 /* do nothing, just check for link up at the end */
4241         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4242                 u32 adv, new_adv;
4243 
4244                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4245                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4246                                   ADVERTISE_1000XPAUSE |
4247                                   ADVERTISE_1000XPSE_ASYM |
4248                                   ADVERTISE_SLCT);
4249 
4250                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4251 
4252                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4253                         new_adv |= ADVERTISE_1000XHALF;
4254                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4255                         new_adv |= ADVERTISE_1000XFULL;
4256 
4257                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4258                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4259                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4260                         tg3_writephy(tp, MII_BMCR, bmcr);
4261 
4262                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4263                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4264                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4265 
4266                         return err;
4267                 }
4268         } else {
4269                 u32 new_bmcr;
4270 
4271                 bmcr &= ~BMCR_SPEED1000;
4272                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4273 
4274                 if (tp->link_config.duplex == DUPLEX_FULL)
4275                         new_bmcr |= BMCR_FULLDPLX;
4276 
4277                 if (new_bmcr != bmcr) {
4278                         /* BMCR_SPEED1000 is a reserved bit that needs
4279                          * to be set on write.
4280                          */
4281                         new_bmcr |= BMCR_SPEED1000;
4282 
4283                         /* Force a linkdown */
4284                         if (netif_carrier_ok(tp->dev)) {
4285                                 u32 adv;
4286 
4287                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4288                                 adv &= ~(ADVERTISE_1000XFULL |
4289                                          ADVERTISE_1000XHALF |
4290                                          ADVERTISE_SLCT);
4291                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4292                                 tg3_writephy(tp, MII_BMCR, bmcr |
4293                                                            BMCR_ANRESTART |
4294                                                            BMCR_ANENABLE);
4295                                 udelay(10);
4296                                 netif_carrier_off(tp->dev);
4297                         }
4298                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4299                         bmcr = new_bmcr;
4300                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4301                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4303                             ASIC_REV_5714) {
4304                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4305                                         bmsr |= BMSR_LSTATUS;
4306                                 else
4307                                         bmsr &= ~BMSR_LSTATUS;
4308                         }
4309                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4310                 }
4311         }
4312 
4313         if (bmsr & BMSR_LSTATUS) {
4314                 current_speed = SPEED_1000;
4315                 current_link_up = 1;
4316                 if (bmcr & BMCR_FULLDPLX)
4317                         current_duplex = DUPLEX_FULL;
4318                 else
4319                         current_duplex = DUPLEX_HALF;
4320 
4321                 local_adv = 0;
4322                 remote_adv = 0;
4323 
4324                 if (bmcr & BMCR_ANENABLE) {
4325                         u32 common;
4326 
4327                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4328                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4329                         common = local_adv & remote_adv;
4330                         if (common & (ADVERTISE_1000XHALF |
4331                                       ADVERTISE_1000XFULL)) {
4332                                 if (common & ADVERTISE_1000XFULL)
4333                                         current_duplex = DUPLEX_FULL;
4334                                 else
4335                                         current_duplex = DUPLEX_HALF;
4336                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4337                                 /* Link is up via parallel detect */
4338                         } else {
4339                                 current_link_up = 0;
4340                         }
4341                 }
4342         }
4343 
4344         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4345                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4346 
4347         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4348         if (tp->link_config.active_duplex == DUPLEX_HALF)
4349                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4350 
4351         tw32_f(MAC_MODE, tp->mac_mode);
4352         udelay(40);
4353 
4354         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4355 
4356         tp->link_config.active_speed = current_speed;
4357         tp->link_config.active_duplex = current_duplex;
4358 
4359         if (current_link_up != netif_carrier_ok(tp->dev)) {
4360                 if (current_link_up)
4361                         netif_carrier_on(tp->dev);
4362                 else {
4363                         netif_carrier_off(tp->dev);
4364                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4365                 }
4366                 tg3_link_report(tp);
4367         }
4368         return err;
4369 }
4370 
4371 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4372 {
4373         if (tp->serdes_counter) {
4374                 /* Give autoneg time to complete. */
4375                 tp->serdes_counter--;
4376                 return;
4377         }
4378 
4379         if (!netif_carrier_ok(tp->dev) &&
4380             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4381                 u32 bmcr;
4382 
4383                 tg3_readphy(tp, MII_BMCR, &bmcr);
4384                 if (bmcr & BMCR_ANENABLE) {
4385                         u32 phy1, phy2;
4386 
4387                         /* Select shadow register 0x1f */
4388                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4389                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4390 
4391                         /* Select expansion interrupt status register */
4392                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4393                                          MII_TG3_DSP_EXP1_INT_STAT);
4394                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4395                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396 
4397                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4398                                 /* We have signal detect and not receiving
4399                                  * config code words, link is up by parallel
4400                                  * detection.
4401                                  */
4402 
4403                                 bmcr &= ~BMCR_ANENABLE;
4404                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4405                                 tg3_writephy(tp, MII_BMCR, bmcr);
4406                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4407                         }
4408                 }
4409         } else if (netif_carrier_ok(tp->dev) &&
4410                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4411                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4412                 u32 phy2;
4413 
4414                 /* Select expansion interrupt status register */
4415                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4416                                  MII_TG3_DSP_EXP1_INT_STAT);
4417                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4418                 if (phy2 & 0x20) {
4419                         u32 bmcr;
4420 
4421                         /* Config code words received, turn on autoneg. */
4422                         tg3_readphy(tp, MII_BMCR, &bmcr);
4423                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4424 
4425                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426 
4427                 }
4428         }
4429 }
4430 
4431 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4432 {
4433         u32 val;
4434         int err;
4435 
4436         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4437                 err = tg3_setup_fiber_phy(tp, force_reset);
4438         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4439                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4440         else
4441                 err = tg3_setup_copper_phy(tp, force_reset);
4442 
4443         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4444                 u32 scale;
4445 
4446                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4447                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4448                         scale = 65;
4449                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4450                         scale = 6;
4451                 else
4452                         scale = 12;
4453 
4454                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4455                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4456                 tw32(GRC_MISC_CFG, val);
4457         }
4458 
4459         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4460               (6 << TX_LENGTHS_IPG_SHIFT);
4461         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4462                 val |= tr32(MAC_TX_LENGTHS) &
4463                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4464                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4465 
4466         if (tp->link_config.active_speed == SPEED_1000 &&
4467             tp->link_config.active_duplex == DUPLEX_HALF)
4468                 tw32(MAC_TX_LENGTHS, val |
4469                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4470         else
4471                 tw32(MAC_TX_LENGTHS, val |
4472                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4473 
4474         if (!tg3_flag(tp, 5705_PLUS)) {
4475                 if (netif_carrier_ok(tp->dev)) {
4476                         tw32(HOSTCC_STAT_COAL_TICKS,
4477                              tp->coal.stats_block_coalesce_usecs);
4478                 } else {
4479                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4480                 }
4481         }
4482 
4483         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4484                 val = tr32(PCIE_PWR_MGMT_THRESH);
4485                 if (!netif_carrier_ok(tp->dev))
4486                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4487                               tp->pwrmgmt_thresh;
4488                 else
4489                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4490                 tw32(PCIE_PWR_MGMT_THRESH, val);
4491         }
4492 
4493         return err;
4494 }
4495 
4496 static inline int tg3_irq_sync(struct tg3 *tp)
4497 {
4498         return tp->irq_sync;
4499 }
4500 
4501 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4502 {
4503         int i;
4504 
4505         dst = (u32 *)((u8 *)dst + off);
4506         for (i = 0; i < len; i += sizeof(u32))
4507                 *dst++ = tr32(off + i);
4508 }
4509 
4510 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4511 {
4512         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4513         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4514         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4515         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4516         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4517         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4518         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4519         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4520         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4521         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4522         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4523         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4524         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4525         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4526         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4527         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4528         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4529         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4530         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4531 
4532         if (tg3_flag(tp, SUPPORT_MSIX))
4533                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4534 
4535         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4536         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4537         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4538         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4539         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4540         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4541         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4542         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4543 
4544         if (!tg3_flag(tp, 5705_PLUS)) {
4545                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4546                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4547                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4548         }
4549 
4550         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4551         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4552         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4553         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4554         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4555 
4556         if (tg3_flag(tp, NVRAM))
4557                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4558 }
4559 
4560 static void tg3_dump_state(struct tg3 *tp)
4561 {
4562         int i;
4563         u32 *regs;
4564 
4565         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4566         if (!regs) {
4567                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4568                 return;
4569         }
4570 
4571         if (tg3_flag(tp, PCI_EXPRESS)) {
4572                 /* Read up to but not including private PCI registers */
4573                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4574                         regs[i / sizeof(u32)] = tr32(i);
4575         } else
4576                 tg3_dump_legacy_regs(tp, regs);
4577 
4578         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4579                 if (!regs[i + 0] && !regs[i + 1] &&
4580                     !regs[i + 2] && !regs[i + 3])
4581                         continue;
4582 
4583                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4584                            i * 4,
4585                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4586         }
4587 
4588         kfree(regs);
4589 
4590         for (i = 0; i < tp->irq_cnt; i++) {
4591                 struct tg3_napi *tnapi = &tp->napi[i];
4592 
4593                 /* SW status block */
4594                 netdev_err(tp->dev,
4595                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4596                            i,
4597                            tnapi->hw_status->status,
4598                            tnapi->hw_status->status_tag,
4599                            tnapi->hw_status->rx_jumbo_consumer,
4600                            tnapi->hw_status->rx_consumer,
4601                            tnapi->hw_status->rx_mini_consumer,
4602                            tnapi->hw_status->idx[0].rx_producer,
4603                            tnapi->hw_status->idx[0].tx_consumer);
4604 
4605                 netdev_err(tp->dev,
4606                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4607                            i,
4608                            tnapi->last_tag, tnapi->last_irq_tag,
4609                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4610                            tnapi->rx_rcb_ptr,
4611                            tnapi->prodring.rx_std_prod_idx,
4612                            tnapi->prodring.rx_std_cons_idx,
4613                            tnapi->prodring.rx_jmb_prod_idx,
4614                            tnapi->prodring.rx_jmb_cons_idx);
4615         }
4616 }
4617 
4618 /* This is called whenever we suspect that the system chipset is re-
4619  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4620  * is bogus tx completions. We try to recover by setting the
4621  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4622  * in the workqueue.
4623  */
4624 static void tg3_tx_recover(struct tg3 *tp)
4625 {
4626         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4627                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4628 
4629         netdev_warn(tp->dev,
4630                     "The system may be re-ordering memory-mapped I/O "
4631                     "cycles to the network device, attempting to recover. "
4632                     "Please report the problem to the driver maintainer "
4633                     "and include system chipset information.\n");
4634 
4635         spin_lock(&tp->lock);
4636         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4637         spin_unlock(&tp->lock);
4638 }
4639 
4640 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4641 {
4642         /* Tell compiler to fetch tx indices from memory. */
4643         barrier();
4644         return tnapi->tx_pending -
4645                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4646 }
4647 
4648 /* Tigon3 never reports partial packet sends.  So we do not
4649  * need special logic to handle SKBs that have not had all
4650  * of their frags sent yet, like SunGEM does.
4651  */
4652 static void tg3_tx(struct tg3_napi *tnapi)
4653 {
4654         struct tg3 *tp = tnapi->tp;
4655         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4656         u32 sw_idx = tnapi->tx_cons;
4657         struct netdev_queue *txq;
4658         int index = tnapi - tp->napi;
4659 
4660         if (tg3_flag(tp, ENABLE_TSS))
4661                 index--;
4662 
4663         txq = netdev_get_tx_queue(tp->dev, index);
4664 
4665         while (sw_idx != hw_idx) {
4666                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4667                 struct sk_buff *skb = ri->skb;
4668                 int i, tx_bug = 0;
4669 
4670                 if (unlikely(skb == NULL)) {
4671                         tg3_tx_recover(tp);
4672                         return;
4673                 }
4674 
4675                 pci_unmap_single(tp->pdev,
4676                                  dma_unmap_addr(ri, mapping),
4677                                  skb_headlen(skb),
4678                                  PCI_DMA_TODEVICE);
4679 
4680                 ri->skb = NULL;
4681 
4682                 sw_idx = NEXT_TX(sw_idx);
4683 
4684                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4685                         ri = &tnapi->tx_buffers[sw_idx];
4686                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4687                                 tx_bug = 1;
4688 
4689                         pci_unmap_page(tp->pdev,
4690                                        dma_unmap_addr(ri, mapping),
4691                                        skb_shinfo(skb)->frags[i].size,
4692                                        PCI_DMA_TODEVICE);
4693                         sw_idx = NEXT_TX(sw_idx);
4694                 }
4695 
4696                 dev_kfree_skb(skb);
4697 
4698                 if (unlikely(tx_bug)) {
4699                         tg3_tx_recover(tp);
4700                         return;
4701                 }
4702         }
4703 
4704         tnapi->tx_cons = sw_idx;
4705 
4706         /* Need to make the tx_cons update visible to tg3_start_xmit()
4707          * before checking for netif_queue_stopped().  Without the
4708          * memory barrier, there is a small possibility that tg3_start_xmit()
4709          * will miss it and cause the queue to be stopped forever.
4710          */
4711         smp_mb();
4712 
4713         if (unlikely(netif_tx_queue_stopped(txq) &&
4714                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4715                 __netif_tx_lock(txq, smp_processor_id());
4716                 if (netif_tx_queue_stopped(txq) &&
4717                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4718                         netif_tx_wake_queue(txq);
4719                 __netif_tx_unlock(txq);
4720         }
4721 }
4722 
4723 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4724 {
4725         if (!ri->skb)
4726                 return;
4727 
4728         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4729                          map_sz, PCI_DMA_FROMDEVICE);
4730         dev_kfree_skb_any(ri->skb);
4731         ri->skb = NULL;
4732 }
4733 
4734 /* Returns size of skb allocated or < 0 on error.
4735  *
4736  * We only need to fill in the address because the other members
4737  * of the RX descriptor are invariant, see tg3_init_rings.
4738  *
4739  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4740  * posting buffers we only dirty the first cache line of the RX
4741  * descriptor (containing the address).  Whereas for the RX status
4742  * buffers the cpu only reads the last cacheline of the RX descriptor
4743  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4744  */
4745 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4746                             u32 opaque_key, u32 dest_idx_unmasked)
4747 {
4748         struct tg3_rx_buffer_desc *desc;
4749         struct ring_info *map;
4750         struct sk_buff *skb;
4751         dma_addr_t mapping;
4752         int skb_size, dest_idx;
4753 
4754         switch (opaque_key) {
4755         case RXD_OPAQUE_RING_STD:
4756                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4757                 desc = &tpr->rx_std[dest_idx];
4758                 map = &tpr->rx_std_buffers[dest_idx];
4759                 skb_size = tp->rx_pkt_map_sz;
4760                 break;
4761 
4762         case RXD_OPAQUE_RING_JUMBO:
4763                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4764                 desc = &tpr->rx_jmb[dest_idx].std;
4765                 map = &tpr->rx_jmb_buffers[dest_idx];
4766                 skb_size = TG3_RX_JMB_MAP_SZ;
4767                 break;
4768 
4769         default:
4770                 return -EINVAL;
4771         }
4772 
4773         /* Do not overwrite any of the map or rp information
4774          * until we are sure we can commit to a new buffer.
4775          *
4776          * Callers depend upon this behavior and assume that
4777          * we leave everything unchanged if we fail.
4778          */
4779         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4780         if (skb == NULL)
4781                 return -ENOMEM;
4782 
4783         skb_reserve(skb, tp->rx_offset);
4784 
4785         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4786                                  PCI_DMA_FROMDEVICE);
4787         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4788                 dev_kfree_skb(skb);
4789                 return -EIO;
4790         }
4791 
4792         map->skb = skb;
4793         dma_unmap_addr_set(map, mapping, mapping);
4794 
4795         desc->addr_hi = ((u64)mapping >> 32);
4796         desc->addr_lo = ((u64)mapping & 0xffffffff);
4797 
4798         return skb_size;
4799 }
4800 
4801 /* We only need to move over in the address because the other
4802  * members of the RX descriptor are invariant.  See notes above
4803  * tg3_alloc_rx_skb for full details.
4804  */
4805 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4806                            struct tg3_rx_prodring_set *dpr,
4807                            u32 opaque_key, int src_idx,
4808                            u32 dest_idx_unmasked)
4809 {
4810         struct tg3 *tp = tnapi->tp;
4811         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4812         struct ring_info *src_map, *dest_map;
4813         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4814         int dest_idx;
4815 
4816         switch (opaque_key) {
4817         case RXD_OPAQUE_RING_STD:
4818                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4819                 dest_desc = &dpr->rx_std[dest_idx];
4820                 dest_map = &dpr->rx_std_buffers[dest_idx];
4821                 src_desc = &spr->rx_std[src_idx];
4822                 src_map = &spr->rx_std_buffers[src_idx];
4823                 break;
4824 
4825         case RXD_OPAQUE_RING_JUMBO:
4826                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4827                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4828                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4829                 src_desc = &spr->rx_jmb[src_idx].std;
4830                 src_map = &spr->rx_jmb_buffers[src_idx];
4831                 break;
4832 
4833         default:
4834                 return;
4835         }
4836 
4837         dest_map->skb = src_map->skb;
4838         dma_unmap_addr_set(dest_map, mapping,
4839                            dma_unmap_addr(src_map, mapping));
4840         dest_desc->addr_hi = src_desc->addr_hi;
4841         dest_desc->addr_lo = src_desc->addr_lo;
4842 
4843         /* Ensure that the update to the skb happens after the physical
4844          * addresses have been transferred to the new BD location.
4845          */
4846         smp_wmb();
4847 
4848         src_map->skb = NULL;
4849 }
4850 
4851 /* The RX ring scheme is composed of multiple rings which post fresh
4852  * buffers to the chip, and one special ring the chip uses to report
4853  * status back to the host.
4854  *
4855  * The special ring reports the status of received packets to the
4856  * host.  The chip does not write into the original descriptor the
4857  * RX buffer was obtained from.  The chip simply takes the original
4858  * descriptor as provided by the host, updates the status and length
4859  * field, then writes this into the next status ring entry.
4860  *
4861  * Each ring the host uses to post buffers to the chip is described
4862  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4863  * it is first placed into the on-chip ram.  When the packet's length
4864  * is known, it walks down the TG3_BDINFO entries to select the ring.
4865  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4866  * which is within the range of the new packet's length is chosen.
4867  *
4868  * The "separate ring for rx status" scheme may sound queer, but it makes
4869  * sense from a cache coherency perspective.  If only the host writes
4870  * to the buffer post rings, and only the chip writes to the rx status
4871  * rings, then cache lines never move beyond shared-modified state.
4872  * If both the host and chip were to write into the same ring, cache line
4873  * eviction could occur since both entities want it in an exclusive state.
4874  */
4875 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4876 {
4877         struct tg3 *tp = tnapi->tp;
4878         u32 work_mask, rx_std_posted = 0;
4879         u32 std_prod_idx, jmb_prod_idx;
4880         u32 sw_idx = tnapi->rx_rcb_ptr;
4881         u16 hw_idx;
4882         int received;
4883         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4884 
4885         hw_idx = *(tnapi->rx_rcb_prod_idx);
4886         /*
4887          * We need to order the read of hw_idx and the read of
4888          * the opaque cookie.
4889          */
4890         rmb();
4891         work_mask = 0;
4892         received = 0;
4893         std_prod_idx = tpr->rx_std_prod_idx;
4894         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4895         while (sw_idx != hw_idx && budget > 0) {
4896                 struct ring_info *ri;
4897                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4898                 unsigned int len;
4899                 struct sk_buff *skb;
4900                 dma_addr_t dma_addr;
4901                 u32 opaque_key, desc_idx, *post_ptr;
4902 
4903                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4904                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4905                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4906                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4907                         dma_addr = dma_unmap_addr(ri, mapping);
4908                         skb = ri->skb;
4909                         post_ptr = &std_prod_idx;
4910                         rx_std_posted++;
4911                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4912                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4913                         dma_addr = dma_unmap_addr(ri, mapping);
4914                         skb = ri->skb;
4915                         post_ptr = &jmb_prod_idx;
4916                 } else
4917                         goto next_pkt_nopost;
4918 
4919                 work_mask |= opaque_key;
4920 
4921                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4922                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4923                 drop_it:
4924                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4925                                        desc_idx, *post_ptr);
4926                 drop_it_no_recycle:
4927                         /* Other statistics kept track of by card. */
4928                         tp->rx_dropped++;
4929                         goto next_pkt;
4930                 }
4931 
4932                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4933                       ETH_FCS_LEN;
4934 
4935                 if (len > TG3_RX_COPY_THRESH(tp)) {
4936                         int skb_size;
4937 
4938                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4939                                                     *post_ptr);
4940                         if (skb_size < 0)
4941                                 goto drop_it;
4942 
4943                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4944                                          PCI_DMA_FROMDEVICE);
4945 
4946                         /* Ensure that the update to the skb happens
4947                          * after the usage of the old DMA mapping.
4948                          */
4949                         smp_wmb();
4950 
4951                         ri->skb = NULL;
4952 
4953                         skb_put(skb, len);
4954                 } else {
4955                         struct sk_buff *copy_skb;
4956 
4957                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4958                                        desc_idx, *post_ptr);
4959 
4960                         copy_skb = netdev_alloc_skb(tp->dev, len +
4961                                                     TG3_RAW_IP_ALIGN);
4962                         if (copy_skb == NULL)
4963                                 goto drop_it_no_recycle;
4964 
4965                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4966                         skb_put(copy_skb, len);
4967                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4968                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4969                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4970 
4971                         /* We'll reuse the original ring buffer. */
4972                         skb = copy_skb;
4973                 }
4974 
4975                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4976                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4977                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4978                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4979                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4980                 else
4981                         skb_checksum_none_assert(skb);
4982 
4983                 skb->protocol = eth_type_trans(skb, tp->dev);
4984 
4985                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4986                     skb->protocol != htons(ETH_P_8021Q)) {
4987                         dev_kfree_skb(skb);
4988                         goto drop_it_no_recycle;
4989                 }
4990 
4991                 if (desc->type_flags & RXD_FLAG_VLAN &&
4992                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4993                         __vlan_hwaccel_put_tag(skb,
4994                                                desc->err_vlan & RXD_VLAN_MASK);
4995 
4996                 napi_gro_receive(&tnapi->napi, skb);
4997 
4998                 received++;
4999                 budget--;
5000 
5001 next_pkt:
5002                 (*post_ptr)++;
5003 
5004                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5005                         tpr->rx_std_prod_idx = std_prod_idx &
5006                                                tp->rx_std_ring_mask;
5007                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5008                                      tpr->rx_std_prod_idx);
5009                         work_mask &= ~RXD_OPAQUE_RING_STD;
5010                         rx_std_posted = 0;
5011                 }
5012 next_pkt_nopost:
5013                 sw_idx++;
5014                 sw_idx &= tp->rx_ret_ring_mask;
5015 
5016                 /* Refresh hw_idx to see if there is new work */
5017                 if (sw_idx == hw_idx) {
5018                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5019                         rmb();
5020                 }
5021         }
5022 
5023         /* ACK the status ring. */
5024         tnapi->rx_rcb_ptr = sw_idx;
5025         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5026 
5027         /* Refill RX ring(s). */
5028         if (!tg3_flag(tp, ENABLE_RSS)) {
5029                 if (work_mask & RXD_OPAQUE_RING_STD) {
5030                         tpr->rx_std_prod_idx = std_prod_idx &
5031                                                tp->rx_std_ring_mask;
5032                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5033                                      tpr->rx_std_prod_idx);
5034                 }
5035                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5036                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5037                                                tp->rx_jmb_ring_mask;
5038                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5039                                      tpr->rx_jmb_prod_idx);
5040                 }
5041                 mmiowb();
5042         } else if (work_mask) {
5043                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5044                  * updated before the producer indices can be updated.
5045                  */
5046                 smp_wmb();
5047 
5048                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5049                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5050 
5051                 if (tnapi != &tp->napi[1])
5052                         napi_schedule(&tp->napi[1].napi);
5053         }
5054 
5055         return received;
5056 }
5057 
5058 static void tg3_poll_link(struct tg3 *tp)
5059 {
5060         /* handle link change and other phy events */
5061         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5062                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5063 
5064                 if (sblk->status & SD_STATUS_LINK_CHG) {
5065                         sblk->status = SD_STATUS_UPDATED |
5066                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5067                         spin_lock(&tp->lock);
5068                         if (tg3_flag(tp, USE_PHYLIB)) {
5069                                 tw32_f(MAC_STATUS,
5070                                      (MAC_STATUS_SYNC_CHANGED |
5071                                       MAC_STATUS_CFG_CHANGED |
5072                                       MAC_STATUS_MI_COMPLETION |
5073                                       MAC_STATUS_LNKSTATE_CHANGED));
5074                                 udelay(40);
5075                         } else
5076                                 tg3_setup_phy(tp, 0);
5077                         spin_unlock(&tp->lock);
5078                 }
5079         }
5080 }
5081 
5082 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5083                                 struct tg3_rx_prodring_set *dpr,
5084                                 struct tg3_rx_prodring_set *spr)
5085 {
5086         u32 si, di, cpycnt, src_prod_idx;
5087         int i, err = 0;
5088 
5089         while (1) {
5090                 src_prod_idx = spr->rx_std_prod_idx;
5091 
5092                 /* Make sure updates to the rx_std_buffers[] entries and the
5093                  * standard producer index are seen in the correct order.
5094                  */
5095                 smp_rmb();
5096 
5097                 if (spr->rx_std_cons_idx == src_prod_idx)
5098                         break;
5099 
5100                 if (spr->rx_std_cons_idx < src_prod_idx)
5101                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5102                 else
5103                         cpycnt = tp->rx_std_ring_mask + 1 -
5104                                  spr->rx_std_cons_idx;
5105 
5106                 cpycnt = min(cpycnt,
5107                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5108 
5109                 si = spr->rx_std_cons_idx;
5110                 di = dpr->rx_std_prod_idx;
5111 
5112                 for (i = di; i < di + cpycnt; i++) {
5113                         if (dpr->rx_std_buffers[i].skb) {
5114                                 cpycnt = i - di;
5115                                 err = -ENOSPC;
5116                                 break;
5117                         }
5118                 }
5119 
5120                 if (!cpycnt)
5121                         break;
5122 
5123                 /* Ensure that updates to the rx_std_buffers ring and the
5124                  * shadowed hardware producer ring from tg3_recycle_skb() are
5125                  * ordered correctly WRT the skb check above.
5126                  */
5127                 smp_rmb();
5128 
5129                 memcpy(&dpr->rx_std_buffers[di],
5130                        &spr->rx_std_buffers[si],
5131                        cpycnt * sizeof(struct ring_info));
5132 
5133                 for (i = 0; i < cpycnt; i++, di++, si++) {
5134                         struct tg3_rx_buffer_desc *sbd, *dbd;
5135                         sbd = &spr->rx_std[si];
5136                         dbd = &dpr->rx_std[di];
5137                         dbd->addr_hi = sbd->addr_hi;
5138                         dbd->addr_lo = sbd->addr_lo;
5139                 }
5140 
5141                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5142                                        tp->rx_std_ring_mask;
5143                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5144                                        tp->rx_std_ring_mask;
5145         }
5146 
5147         while (1) {
5148                 src_prod_idx = spr->rx_jmb_prod_idx;
5149 
5150                 /* Make sure updates to the rx_jmb_buffers[] entries and
5151                  * the jumbo producer index are seen in the correct order.
5152                  */
5153                 smp_rmb();
5154 
5155                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5156                         break;
5157 
5158                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5159                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5160                 else
5161                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5162                                  spr->rx_jmb_cons_idx;
5163 
5164                 cpycnt = min(cpycnt,
5165                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5166 
5167                 si = spr->rx_jmb_cons_idx;
5168                 di = dpr->rx_jmb_prod_idx;
5169 
5170                 for (i = di; i < di + cpycnt; i++) {
5171                         if (dpr->rx_jmb_buffers[i].skb) {
5172                                 cpycnt = i - di;
5173                                 err = -ENOSPC;
5174                                 break;
5175                         }
5176                 }
5177 
5178                 if (!cpycnt)
5179                         break;
5180 
5181                 /* Ensure that updates to the rx_jmb_buffers ring and the
5182                  * shadowed hardware producer ring from tg3_recycle_skb() are
5183                  * ordered correctly WRT the skb check above.
5184                  */
5185                 smp_rmb();
5186 
5187                 memcpy(&dpr->rx_jmb_buffers[di],
5188                        &spr->rx_jmb_buffers[si],
5189                        cpycnt * sizeof(struct ring_info));
5190 
5191                 for (i = 0; i < cpycnt; i++, di++, si++) {
5192                         struct tg3_rx_buffer_desc *sbd, *dbd;
5193                         sbd = &spr->rx_jmb[si].std;
5194                         dbd = &dpr->rx_jmb[di].std;
5195                         dbd->addr_hi = sbd->addr_hi;
5196                         dbd->addr_lo = sbd->addr_lo;
5197                 }
5198 
5199                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5200                                        tp->rx_jmb_ring_mask;
5201                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5202                                        tp->rx_jmb_ring_mask;
5203         }
5204 
5205         return err;
5206 }
5207 
5208 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5209 {
5210         struct tg3 *tp = tnapi->tp;
5211 
5212         /* run TX completion thread */
5213         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5214                 tg3_tx(tnapi);
5215                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5216                         return work_done;
5217         }
5218 
5219         /* run RX thread, within the bounds set by NAPI.
5220          * All RX "locking" is done by ensuring outside
5221          * code synchronizes with tg3->napi.poll()
5222          */
5223         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5224                 work_done += tg3_rx(tnapi, budget - work_done);
5225 
5226         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5227                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5228                 int i, err = 0;
5229                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5230                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5231 
5232                 for (i = 1; i < tp->irq_cnt; i++)
5233                         err |= tg3_rx_prodring_xfer(tp, dpr,
5234                                                     &tp->napi[i].prodring);
5235 
5236                 wmb();
5237 
5238                 if (std_prod_idx != dpr->rx_std_prod_idx)
5239                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5240                                      dpr->rx_std_prod_idx);
5241 
5242                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5243                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5244                                      dpr->rx_jmb_prod_idx);
5245 
5246                 mmiowb();
5247 
5248                 if (err)
5249                         tw32_f(HOSTCC_MODE, tp->coal_now);
5250         }
5251 
5252         return work_done;
5253 }
5254 
5255 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5256 {
5257         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5258         struct tg3 *tp = tnapi->tp;
5259         int work_done = 0;
5260         struct tg3_hw_status *sblk = tnapi->hw_status;
5261 
5262         while (1) {
5263                 work_done = tg3_poll_work(tnapi, work_done, budget);
5264 
5265                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5266                         goto tx_recovery;
5267 
5268                 if (unlikely(work_done >= budget))
5269                         break;
5270 
5271                 /* tp->last_tag is used in tg3_int_reenable() below
5272                  * to tell the hw how much work has been processed,
5273                  * so we must read it before checking for more work.
5274                  */
5275                 tnapi->last_tag = sblk->status_tag;
5276                 tnapi->last_irq_tag = tnapi->last_tag;
5277                 rmb();
5278 
5279                 /* check for RX/TX work to do */
5280                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5281                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5282                         napi_complete(napi);
5283                         /* Reenable interrupts. */
5284                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5285                         mmiowb();
5286                         break;
5287                 }
5288         }
5289 
5290         return work_done;
5291 
5292 tx_recovery:
5293         /* work_done is guaranteed to be less than budget. */
5294         napi_complete(napi);
5295         schedule_work(&tp->reset_task);
5296         return work_done;
5297 }
5298 
5299 static void tg3_process_error(struct tg3 *tp)
5300 {
5301         u32 val;
5302         bool real_error = false;
5303 
5304         if (tg3_flag(tp, ERROR_PROCESSED))
5305                 return;
5306 
5307         /* Check Flow Attention register */
5308         val = tr32(HOSTCC_FLOW_ATTN);
5309         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5310                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5311                 real_error = true;
5312         }
5313 
5314         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5315                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5316                 real_error = true;
5317         }
5318 
5319         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5320                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5321                 real_error = true;
5322         }
5323 
5324         if (!real_error)
5325                 return;
5326 
5327         tg3_dump_state(tp);
5328 
5329         tg3_flag_set(tp, ERROR_PROCESSED);
5330         schedule_work(&tp->reset_task);
5331 }
5332 
5333 static int tg3_poll(struct napi_struct *napi, int budget)
5334 {
5335         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5336         struct tg3 *tp = tnapi->tp;
5337         int work_done = 0;
5338         struct tg3_hw_status *sblk = tnapi->hw_status;
5339 
5340         while (1) {
5341                 if (sblk->status & SD_STATUS_ERROR)
5342                         tg3_process_error(tp);
5343 
5344                 tg3_poll_link(tp);
5345 
5346                 work_done = tg3_poll_work(tnapi, work_done, budget);
5347 
5348                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5349                         goto tx_recovery;
5350 
5351                 if (unlikely(work_done >= budget))
5352                         break;
5353 
5354                 if (tg3_flag(tp, TAGGED_STATUS)) {
5355                         /* tp->last_tag is used in tg3_int_reenable() below
5356                          * to tell the hw how much work has been processed,
5357                          * so we must read it before checking for more work.
5358                          */
5359                         tnapi->last_tag = sblk->status_tag;
5360                         tnapi->last_irq_tag = tnapi->last_tag;
5361                         rmb();
5362                 } else
5363                         sblk->status &= ~SD_STATUS_UPDATED;
5364 
5365                 if (likely(!tg3_has_work(tnapi))) {
5366                         napi_complete(napi);
5367                         tg3_int_reenable(tnapi);
5368                         break;
5369                 }
5370         }
5371 
5372         return work_done;
5373 
5374 tx_recovery:
5375         /* work_done is guaranteed to be less than budget. */
5376         napi_complete(napi);
5377         schedule_work(&tp->reset_task);
5378         return work_done;
5379 }
5380 
5381 static void tg3_napi_disable(struct tg3 *tp)
5382 {
5383         int i;
5384 
5385         for (i = tp->irq_cnt - 1; i >= 0; i--)
5386                 napi_disable(&tp->napi[i].napi);
5387 }
5388 
5389 static void tg3_napi_enable(struct tg3 *tp)
5390 {
5391         int i;
5392 
5393         for (i = 0; i < tp->irq_cnt; i++)
5394                 napi_enable(&tp->napi[i].napi);
5395 }
5396 
5397 static void tg3_napi_init(struct tg3 *tp)
5398 {
5399         int i;
5400 
5401         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5402         for (i = 1; i < tp->irq_cnt; i++)
5403                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5404 }
5405 
5406 static void tg3_napi_fini(struct tg3 *tp)
5407 {
5408         int i;
5409 
5410         for (i = 0; i < tp->irq_cnt; i++)
5411                 netif_napi_del(&tp->napi[i].napi);
5412 }
5413 
5414 static inline void tg3_netif_stop(struct tg3 *tp)
5415 {
5416         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5417         tg3_napi_disable(tp);
5418         netif_tx_disable(tp->dev);
5419 }
5420 
5421 static inline void tg3_netif_start(struct tg3 *tp)
5422 {
5423         /* NOTE: unconditional netif_tx_wake_all_queues is only
5424          * appropriate so long as all callers are assured to
5425          * have free tx slots (such as after tg3_init_hw)
5426          */
5427         netif_tx_wake_all_queues(tp->dev);
5428 
5429         tg3_napi_enable(tp);
5430         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5431         tg3_enable_ints(tp);
5432 }
5433 
5434 static void tg3_irq_quiesce(struct tg3 *tp)
5435 {
5436         int i;
5437 
5438         BUG_ON(tp->irq_sync);
5439 
5440         tp->irq_sync = 1;
5441         smp_mb();
5442 
5443         for (i = 0; i < tp->irq_cnt; i++)
5444                 synchronize_irq(tp->napi[i].irq_vec);
5445 }
5446 
5447 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5448  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5449  * with as well.  Most of the time, this is not necessary except when
5450  * shutting down the device.
5451  */
5452 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5453 {
5454         spin_lock_bh(&tp->lock);
5455         if (irq_sync)
5456                 tg3_irq_quiesce(tp);
5457 }
5458 
5459 static inline void tg3_full_unlock(struct tg3 *tp)
5460 {
5461         spin_unlock_bh(&tp->lock);
5462 }
5463 
5464 /* One-shot MSI handler - Chip automatically disables interrupt
5465  * after sending MSI so driver doesn't have to do it.
5466  */
5467 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5468 {
5469         struct tg3_napi *tnapi = dev_id;
5470         struct tg3 *tp = tnapi->tp;
5471 
5472         prefetch(tnapi->hw_status);
5473         if (tnapi->rx_rcb)
5474                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5475 
5476         if (likely(!tg3_irq_sync(tp)))
5477                 napi_schedule(&tnapi->napi);
5478 
5479         return IRQ_HANDLED;
5480 }
5481 
5482 /* MSI ISR - No need to check for interrupt sharing and no need to
5483  * flush status block and interrupt mailbox. PCI ordering rules
5484  * guarantee that MSI will arrive after the status block.
5485  */
5486 static irqreturn_t tg3_msi(int irq, void *dev_id)
5487 {
5488         struct tg3_napi *tnapi = dev_id;
5489         struct tg3 *tp = tnapi->tp;
5490 
5491         prefetch(tnapi->hw_status);
5492         if (tnapi->rx_rcb)
5493                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5494         /*
5495          * Writing any value to intr-mbox-0 clears PCI INTA# and
5496          * chip-internal interrupt pending events.
5497          * Writing non-zero to intr-mbox-0 additional tells the
5498          * NIC to stop sending us irqs, engaging "in-intr-handler"
5499          * event coalescing.
5500          */
5501         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5502         if (likely(!tg3_irq_sync(tp)))
5503                 napi_schedule(&tnapi->napi);
5504 
5505         return IRQ_RETVAL(1);
5506 }
5507 
5508 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5509 {
5510         struct tg3_napi *tnapi = dev_id;
5511         struct tg3 *tp = tnapi->tp;
5512         struct tg3_hw_status *sblk = tnapi->hw_status;
5513         unsigned int handled = 1;
5514 
5515         /* In INTx mode, it is possible for the interrupt to arrive at
5516          * the CPU before the status block posted prior to the interrupt.
5517          * Reading the PCI State register will confirm whether the
5518          * interrupt is ours and will flush the status block.
5519          */
5520         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5521                 if (tg3_flag(tp, CHIP_RESETTING) ||
5522                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5523                         handled = 0;
5524                         goto out;
5525                 }
5526         }
5527 
5528         /*
5529          * Writing any value to intr-mbox-0 clears PCI INTA# and
5530          * chip-internal interrupt pending events.
5531          * Writing non-zero to intr-mbox-0 additional tells the
5532          * NIC to stop sending us irqs, engaging "in-intr-handler"
5533          * event coalescing.
5534          *
5535          * Flush the mailbox to de-assert the IRQ immediately to prevent
5536          * spurious interrupts.  The flush impacts performance but
5537          * excessive spurious interrupts can be worse in some cases.
5538          */
5539         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5540         if (tg3_irq_sync(tp))
5541                 goto out;
5542         sblk->status &= ~SD_STATUS_UPDATED;
5543         if (likely(tg3_has_work(tnapi))) {
5544                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5545                 napi_schedule(&tnapi->napi);
5546         } else {
5547                 /* No work, shared interrupt perhaps?  re-enable
5548                  * interrupts, and flush that PCI write
5549                  */
5550                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5551                                0x00000000);
5552         }
5553 out:
5554         return IRQ_RETVAL(handled);
5555 }
5556 
5557 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5558 {
5559         struct tg3_napi *tnapi = dev_id;
5560         struct tg3 *tp = tnapi->tp;
5561         struct tg3_hw_status *sblk = tnapi->hw_status;
5562         unsigned int handled = 1;
5563 
5564         /* In INTx mode, it is possible for the interrupt to arrive at
5565          * the CPU before the status block posted prior to the interrupt.
5566          * Reading the PCI State register will confirm whether the
5567          * interrupt is ours and will flush the status block.
5568          */
5569         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5570                 if (tg3_flag(tp, CHIP_RESETTING) ||
5571                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5572                         handled = 0;
5573                         goto out;
5574                 }
5575         }
5576 
5577         /*
5578          * writing any value to intr-mbox-0 clears PCI INTA# and
5579          * chip-internal interrupt pending events.
5580          * writing non-zero to intr-mbox-0 additional tells the
5581          * NIC to stop sending us irqs, engaging "in-intr-handler"
5582          * event coalescing.
5583          *
5584          * Flush the mailbox to de-assert the IRQ immediately to prevent
5585          * spurious interrupts.  The flush impacts performance but
5586          * excessive spurious interrupts can be worse in some cases.
5587          */
5588         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5589 
5590         /*
5591          * In a shared interrupt configuration, sometimes other devices'
5592          * interrupts will scream.  We record the current status tag here
5593          * so that the above check can report that the screaming interrupts
5594          * are unhandled.  Eventually they will be silenced.
5595          */
5596         tnapi->last_irq_tag = sblk->status_tag;
5597 
5598         if (tg3_irq_sync(tp))
5599                 goto out;
5600 
5601         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5602 
5603         napi_schedule(&tnapi->napi);
5604 
5605 out:
5606         return IRQ_RETVAL(handled);
5607 }
5608 
5609 /* ISR for interrupt test */
5610 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5611 {
5612         struct tg3_napi *tnapi = dev_id;
5613         struct tg3 *tp = tnapi->tp;
5614         struct tg3_hw_status *sblk = tnapi->hw_status;
5615 
5616         if ((sblk->status & SD_STATUS_UPDATED) ||
5617             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5618                 tg3_disable_ints(tp);
5619                 return IRQ_RETVAL(1);
5620         }
5621         return IRQ_RETVAL(0);
5622 }
5623 
5624 static int tg3_init_hw(struct tg3 *, int);
5625 static int tg3_halt(struct tg3 *, int, int);
5626 
5627 /* Restart hardware after configuration changes, self-test, etc.
5628  * Invoked with tp->lock held.
5629  */
5630 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5631         __releases(tp->lock)
5632         __acquires(tp->lock)
5633 {
5634         int err;
5635 
5636         err = tg3_init_hw(tp, reset_phy);
5637         if (err) {
5638                 netdev_err(tp->dev,
5639                            "Failed to re-initialize device, aborting\n");
5640                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5641                 tg3_full_unlock(tp);
5642                 del_timer_sync(&tp->timer);
5643                 tp->irq_sync = 0;
5644                 tg3_napi_enable(tp);
5645                 dev_close(tp->dev);
5646                 tg3_full_lock(tp, 0);
5647         }
5648         return err;
5649 }
5650 
5651 #ifdef CONFIG_NET_POLL_CONTROLLER
5652 static void tg3_poll_controller(struct net_device *dev)
5653 {
5654         int i;
5655         struct tg3 *tp = netdev_priv(dev);
5656 
5657         for (i = 0; i < tp->irq_cnt; i++)
5658                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5659 }
5660 #endif
5661 
5662 static void tg3_reset_task(struct work_struct *work)
5663 {
5664         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5665         int err;
5666         unsigned int restart_timer;
5667 
5668         tg3_full_lock(tp, 0);
5669 
5670         if (!netif_running(tp->dev)) {
5671                 tg3_full_unlock(tp);
5672                 return;
5673         }
5674 
5675         tg3_full_unlock(tp);
5676 
5677         tg3_phy_stop(tp);
5678 
5679         tg3_netif_stop(tp);
5680 
5681         tg3_full_lock(tp, 1);
5682 
5683         restart_timer = tg3_flag(tp, RESTART_TIMER);
5684         tg3_flag_clear(tp, RESTART_TIMER);
5685 
5686         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5687                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5688                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5689                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5690                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5691         }
5692 
5693         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5694         err = tg3_init_hw(tp, 1);
5695         if (err)
5696                 goto out;
5697 
5698         tg3_netif_start(tp);
5699 
5700         if (restart_timer)
5701                 mod_timer(&tp->timer, jiffies + 1);
5702 
5703 out:
5704         tg3_full_unlock(tp);
5705 
5706         if (!err)
5707                 tg3_phy_start(tp);
5708 }
5709 
5710 static void tg3_tx_timeout(struct net_device *dev)
5711 {
5712         struct tg3 *tp = netdev_priv(dev);
5713 
5714         if (netif_msg_tx_err(tp)) {
5715                 netdev_err(dev, "transmit timed out, resetting\n");
5716                 tg3_dump_state(tp);
5717         }
5718 
5719         schedule_work(&tp->reset_task);
5720 }
5721 
5722 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5723 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5724 {
5725         u32 base = (u32) mapping & 0xffffffff;
5726 
5727         return (base > 0xffffdcc0) && (base + len + 8 < base);
5728 }
5729 
5730 /* Test for DMA addresses > 40-bit */
5731 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5732                                           int len)
5733 {
5734 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5735         if (tg3_flag(tp, 40BIT_DMA_BUG))
5736                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5737         return 0;
5738 #else
5739         return 0;
5740 #endif
5741 }
5742 
5743 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5744                         dma_addr_t mapping, int len, u32 flags,
5745                         u32 mss_and_is_end)
5746 {
5747         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5748         int is_end = (mss_and_is_end & 0x1);
5749         u32 mss = (mss_and_is_end >> 1);
5750         u32 vlan_tag = 0;
5751 
5752         if (is_end)
5753                 flags |= TXD_FLAG_END;
5754         if (flags & TXD_FLAG_VLAN) {
5755                 vlan_tag = flags >> 16;
5756                 flags &= 0xffff;
5757         }
5758         vlan_tag |= (mss << TXD_MSS_SHIFT);
5759 
5760         txd->addr_hi = ((u64) mapping >> 32);
5761         txd->addr_lo = ((u64) mapping & 0xffffffff);
5762         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5763         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5764 }
5765 
5766 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5767                                 struct sk_buff *skb, int last)
5768 {
5769         int i;
5770         u32 entry = tnapi->tx_prod;
5771         struct ring_info *txb = &tnapi->tx_buffers[entry];
5772 
5773         pci_unmap_single(tnapi->tp->pdev,
5774                          dma_unmap_addr(txb, mapping),
5775                          skb_headlen(skb),
5776                          PCI_DMA_TODEVICE);
5777         for (i = 0; i < last; i++) {
5778                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5779 
5780                 entry = NEXT_TX(entry);
5781                 txb = &tnapi->tx_buffers[entry];
5782 
5783                 pci_unmap_page(tnapi->tp->pdev,
5784                                dma_unmap_addr(txb, mapping),
5785                                frag->size, PCI_DMA_TODEVICE);
5786         }
5787 }
5788 
5789 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5790 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5791                                        struct sk_buff *skb,
5792                                        u32 base_flags, u32 mss)
5793 {
5794         struct tg3 *tp = tnapi->tp;
5795         struct sk_buff *new_skb;
5796         dma_addr_t new_addr = 0;
5797         u32 entry = tnapi->tx_prod;
5798         int ret = 0;
5799 
5800         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5801                 new_skb = skb_copy(skb, GFP_ATOMIC);
5802         else {
5803                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5804 
5805                 new_skb = skb_copy_expand(skb,
5806                                           skb_headroom(skb) + more_headroom,
5807                                           skb_tailroom(skb), GFP_ATOMIC);
5808         }
5809 
5810         if (!new_skb) {
5811                 ret = -1;
5812         } else {
5813                 /* New SKB is guaranteed to be linear. */
5814                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5815                                           PCI_DMA_TODEVICE);
5816                 /* Make sure the mapping succeeded */
5817                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5818                         ret = -1;
5819                         dev_kfree_skb(new_skb);
5820 
5821                 /* Make sure new skb does not cross any 4G boundaries.
5822                  * Drop the packet if it does.
5823                  */
5824                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5825                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5826                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5827                                          PCI_DMA_TODEVICE);
5828                         ret = -1;
5829                         dev_kfree_skb(new_skb);
5830                 } else {
5831                         tnapi->tx_buffers[entry].skb = new_skb;
5832                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5833                                            mapping, new_addr);
5834 
5835                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5836                                     base_flags, 1 | (mss << 1));
5837                 }
5838         }
5839 
5840         dev_kfree_skb(skb);
5841 
5842         return ret;
5843 }
5844 
5845 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5846 
5847 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5848  * TSO header is greater than 80 bytes.
5849  */
5850 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5851 {
5852         struct sk_buff *segs, *nskb;
5853         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5854 
5855         /* Estimate the number of fragments in the worst case */
5856         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5857                 netif_stop_queue(tp->dev);
5858 
5859                 /* netif_tx_stop_queue() must be done before checking
5860                  * checking tx index in tg3_tx_avail() below, because in
5861                  * tg3_tx(), we update tx index before checking for
5862                  * netif_tx_queue_stopped().
5863                  */
5864                 smp_mb();
5865                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5866                         return NETDEV_TX_BUSY;
5867 
5868                 netif_wake_queue(tp->dev);
5869         }
5870 
5871         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5872         if (IS_ERR(segs))
5873                 goto tg3_tso_bug_end;
5874 
5875         do {
5876                 nskb = segs;
5877                 segs = segs->next;
5878                 nskb->next = NULL;
5879                 tg3_start_xmit(nskb, tp->dev);
5880         } while (segs);
5881 
5882 tg3_tso_bug_end:
5883         dev_kfree_skb(skb);
5884 
5885         return NETDEV_TX_OK;
5886 }
5887 
5888 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5889  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5890  */
5891 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5892 {
5893         struct tg3 *tp = netdev_priv(dev);
5894         u32 len, entry, base_flags, mss;
5895         int i = -1, would_hit_hwbug;
5896         dma_addr_t mapping;
5897         struct tg3_napi *tnapi;
5898         struct netdev_queue *txq;
5899         unsigned int last;
5900 
5901         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5902         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5903         if (tg3_flag(tp, ENABLE_TSS))
5904                 tnapi++;
5905 
5906         /* We are running in BH disabled context with netif_tx_lock
5907          * and TX reclaim runs via tp->napi.poll inside of a software
5908          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5909          * no IRQ context deadlocks to worry about either.  Rejoice!
5910          */
5911         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5912                 if (!netif_tx_queue_stopped(txq)) {
5913                         netif_tx_stop_queue(txq);
5914 
5915                         /* This is a hard error, log it. */
5916                         netdev_err(dev,
5917                                    "BUG! Tx Ring full when queue awake!\n");
5918                 }
5919                 return NETDEV_TX_BUSY;
5920         }
5921 
5922         entry = tnapi->tx_prod;
5923         base_flags = 0;
5924         if (skb->ip_summed == CHECKSUM_PARTIAL)
5925                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5926 
5927         mss = skb_shinfo(skb)->gso_size;
5928         if (mss) {
5929                 struct iphdr *iph;
5930                 u32 tcp_opt_len, hdr_len;
5931 
5932                 if (skb_header_cloned(skb) &&
5933                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5934                         dev_kfree_skb(skb);
5935                         goto out_unlock;
5936                 }
5937 
5938                 iph = ip_hdr(skb);
5939                 tcp_opt_len = tcp_optlen(skb);
5940 
5941                 if (skb_is_gso_v6(skb)) {
5942                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5943                 } else {
5944                         u32 ip_tcp_len;
5945 
5946                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5947                         hdr_len = ip_tcp_len + tcp_opt_len;
5948 
5949                         iph->check = 0;
5950                         iph->tot_len = htons(mss + hdr_len);
5951                 }
5952 
5953                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5954                     tg3_flag(tp, TSO_BUG))
5955                         return tg3_tso_bug(tp, skb);
5956 
5957                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5958                                TXD_FLAG_CPU_POST_DMA);
5959 
5960                 if (tg3_flag(tp, HW_TSO_1) ||
5961                     tg3_flag(tp, HW_TSO_2) ||
5962                     tg3_flag(tp, HW_TSO_3)) {
5963                         tcp_hdr(skb)->check = 0;
5964                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5965                 } else
5966                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5967                                                                  iph->daddr, 0,
5968                                                                  IPPROTO_TCP,
5969                                                                  0);
5970 
5971                 if (tg3_flag(tp, HW_TSO_3)) {
5972                         mss |= (hdr_len & 0xc) << 12;
5973                         if (hdr_len & 0x10)
5974                                 base_flags |= 0x00000010;
5975                         base_flags |= (hdr_len & 0x3e0) << 5;
5976                 } else if (tg3_flag(tp, HW_TSO_2))
5977                         mss |= hdr_len << 9;
5978                 else if (tg3_flag(tp, HW_TSO_1) ||
5979                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5980                         if (tcp_opt_len || iph->ihl > 5) {
5981                                 int tsflags;
5982 
5983                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5984                                 mss |= (tsflags << 11);
5985                         }
5986                 } else {
5987                         if (tcp_opt_len || iph->ihl > 5) {
5988                                 int tsflags;
5989 
5990                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5991                                 base_flags |= tsflags << 12;
5992                         }
5993                 }
5994         }
5995 
5996         if (vlan_tx_tag_present(skb))
5997                 base_flags |= (TXD_FLAG_VLAN |
5998                                (vlan_tx_tag_get(skb) << 16));
5999 
6000         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6001             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6002                 base_flags |= TXD_FLAG_JMB_PKT;
6003 
6004         len = skb_headlen(skb);
6005 
6006         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6007         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6008                 dev_kfree_skb(skb);
6009                 goto out_unlock;
6010         }
6011 
6012         tnapi->tx_buffers[entry].skb = skb;
6013         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6014 
6015         would_hit_hwbug = 0;
6016 
6017         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6018                 would_hit_hwbug = 1;
6019 
6020         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6021             tg3_4g_overflow_test(mapping, len))
6022                 would_hit_hwbug = 1;
6023 
6024         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6025             tg3_40bit_overflow_test(tp, mapping, len))
6026                 would_hit_hwbug = 1;
6027 
6028         if (tg3_flag(tp, 5701_DMA_BUG))
6029                 would_hit_hwbug = 1;
6030 
6031         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6032                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6033 
6034         entry = NEXT_TX(entry);
6035 
6036         /* Now loop through additional data fragments, and queue them. */
6037         if (skb_shinfo(skb)->nr_frags > 0) {
6038                 last = skb_shinfo(skb)->nr_frags - 1;
6039                 for (i = 0; i <= last; i++) {
6040                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6041 
6042                         len = frag->size;
6043                         mapping = pci_map_page(tp->pdev,
6044                                                frag->page,
6045                                                frag->page_offset,
6046                                                len, PCI_DMA_TODEVICE);
6047 
6048                         tnapi->tx_buffers[entry].skb = NULL;
6049                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6050                                            mapping);
6051                         if (pci_dma_mapping_error(tp->pdev, mapping))
6052                                 goto dma_error;
6053 
6054                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6055                             len <= 8)
6056                                 would_hit_hwbug = 1;
6057 
6058                         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6059                             tg3_4g_overflow_test(mapping, len))
6060                                 would_hit_hwbug = 1;
6061 
6062                         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6063                             tg3_40bit_overflow_test(tp, mapping, len))
6064                                 would_hit_hwbug = 1;
6065 
6066                         if (tg3_flag(tp, HW_TSO_1) ||
6067                             tg3_flag(tp, HW_TSO_2) ||
6068                             tg3_flag(tp, HW_TSO_3))
6069                                 tg3_set_txd(tnapi, entry, mapping, len,
6070                                             base_flags, (i == last)|(mss << 1));
6071                         else
6072                                 tg3_set_txd(tnapi, entry, mapping, len,
6073                                             base_flags, (i == last));
6074 
6075                         entry = NEXT_TX(entry);
6076                 }
6077         }
6078 
6079         if (would_hit_hwbug) {
6080                 tg3_skb_error_unmap(tnapi, skb, i);
6081 
6082                 /* If the workaround fails due to memory/mapping
6083                  * failure, silently drop this packet.
6084                  */
6085                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6086                         goto out_unlock;
6087 
6088                 entry = NEXT_TX(tnapi->tx_prod);
6089         }
6090 
6091         /* Packets are ready, update Tx producer idx local and on card. */
6092         tw32_tx_mbox(tnapi->prodmbox, entry);
6093 
6094         tnapi->tx_prod = entry;
6095         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6096                 netif_tx_stop_queue(txq);
6097 
6098                 /* netif_tx_stop_queue() must be done before checking
6099                  * checking tx index in tg3_tx_avail() below, because in
6100                  * tg3_tx(), we update tx index before checking for
6101                  * netif_tx_queue_stopped().
6102                  */
6103                 smp_mb();
6104                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6105                         netif_tx_wake_queue(txq);
6106         }
6107 
6108 out_unlock:
6109         mmiowb();
6110 
6111         return NETDEV_TX_OK;
6112 
6113 dma_error:
6114         tg3_skb_error_unmap(tnapi, skb, i);
6115         dev_kfree_skb(skb);
6116         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6117         return NETDEV_TX_OK;
6118 }
6119 
6120 static void tg3_set_loopback(struct net_device *dev, u32 features)
6121 {
6122         struct tg3 *tp = netdev_priv(dev);
6123 
6124         if (features & NETIF_F_LOOPBACK) {
6125                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6126                         return;
6127 
6128                 /*
6129                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6130                  * loopback mode if Half-Duplex mode was negotiated earlier.
6131                  */
6132                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6133 
6134                 /* Enable internal MAC loopback mode */
6135                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6136                 spin_lock_bh(&tp->lock);
6137                 tw32(MAC_MODE, tp->mac_mode);
6138                 netif_carrier_on(tp->dev);
6139                 spin_unlock_bh(&tp->lock);
6140                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6141         } else {
6142                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6143                         return;
6144 
6145                 /* Disable internal MAC loopback mode */
6146                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6147                 spin_lock_bh(&tp->lock);
6148                 tw32(MAC_MODE, tp->mac_mode);
6149                 /* Force link status check */
6150                 tg3_setup_phy(tp, 1);
6151                 spin_unlock_bh(&tp->lock);
6152                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6153         }
6154 }
6155 
6156 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6157 {
6158         struct tg3 *tp = netdev_priv(dev);
6159 
6160         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6161                 features &= ~NETIF_F_ALL_TSO;
6162 
6163         return features;
6164 }
6165 
6166 static int tg3_set_features(struct net_device *dev, u32 features)
6167 {
6168         u32 changed = dev->features ^ features;
6169 
6170         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6171                 tg3_set_loopback(dev, features);
6172 
6173         return 0;
6174 }
6175 
6176 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6177                                int new_mtu)
6178 {
6179         dev->mtu = new_mtu;
6180 
6181         if (new_mtu > ETH_DATA_LEN) {
6182                 if (tg3_flag(tp, 5780_CLASS)) {
6183                         netdev_update_features(dev);
6184                         tg3_flag_clear(tp, TSO_CAPABLE);
6185                 } else {
6186                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6187                 }
6188         } else {
6189                 if (tg3_flag(tp, 5780_CLASS)) {
6190                         tg3_flag_set(tp, TSO_CAPABLE);
6191                         netdev_update_features(dev);
6192                 }
6193                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6194         }
6195 }
6196 
6197 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6198 {
6199         struct tg3 *tp = netdev_priv(dev);
6200         int err;
6201 
6202         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6203                 return -EINVAL;
6204 
6205         if (!netif_running(dev)) {
6206                 /* We'll just catch it later when the
6207                  * device is up'd.
6208                  */
6209                 tg3_set_mtu(dev, tp, new_mtu);
6210                 return 0;
6211         }
6212 
6213         tg3_phy_stop(tp);
6214 
6215         tg3_netif_stop(tp);
6216 
6217         tg3_full_lock(tp, 1);
6218 
6219         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6220 
6221         tg3_set_mtu(dev, tp, new_mtu);
6222 
6223         err = tg3_restart_hw(tp, 0);
6224 
6225         if (!err)
6226                 tg3_netif_start(tp);
6227 
6228         tg3_full_unlock(tp);
6229 
6230         if (!err)
6231                 tg3_phy_start(tp);
6232 
6233         return err;
6234 }
6235 
6236 static void tg3_rx_prodring_free(struct tg3 *tp,
6237                                  struct tg3_rx_prodring_set *tpr)
6238 {
6239         int i;
6240 
6241         if (tpr != &tp->napi[0].prodring) {
6242                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6243                      i = (i + 1) & tp->rx_std_ring_mask)
6244                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6245                                         tp->rx_pkt_map_sz);
6246 
6247                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6248                         for (i = tpr->rx_jmb_cons_idx;
6249                              i != tpr->rx_jmb_prod_idx;
6250                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6251                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6252                                                 TG3_RX_JMB_MAP_SZ);
6253                         }
6254                 }
6255 
6256                 return;
6257         }
6258 
6259         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6260                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6261                                 tp->rx_pkt_map_sz);
6262 
6263         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6264                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6265                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6266                                         TG3_RX_JMB_MAP_SZ);
6267         }
6268 }
6269 
6270 /* Initialize rx rings for packet processing.
6271  *
6272  * The chip has been shut down and the driver detached from
6273  * the networking, so no interrupts or new tx packets will
6274  * end up in the driver.  tp->{tx,}lock are held and thus
6275  * we may not sleep.
6276  */
6277 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6278                                  struct tg3_rx_prodring_set *tpr)
6279 {
6280         u32 i, rx_pkt_dma_sz;
6281 
6282         tpr->rx_std_cons_idx = 0;
6283         tpr->rx_std_prod_idx = 0;
6284         tpr->rx_jmb_cons_idx = 0;
6285         tpr->rx_jmb_prod_idx = 0;
6286 
6287         if (tpr != &tp->napi[0].prodring) {
6288                 memset(&tpr->rx_std_buffers[0], 0,
6289                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6290                 if (tpr->rx_jmb_buffers)
6291                         memset(&tpr->rx_jmb_buffers[0], 0,
6292                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6293                 goto done;
6294         }
6295 
6296         /* Zero out all descriptors. */
6297         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6298 
6299         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6300         if (tg3_flag(tp, 5780_CLASS) &&
6301             tp->dev->mtu > ETH_DATA_LEN)
6302                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6303         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6304 
6305         /* Initialize invariants of the rings, we only set this
6306          * stuff once.  This works because the card does not
6307          * write into the rx buffer posting rings.
6308          */
6309         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6310                 struct tg3_rx_buffer_desc *rxd;
6311 
6312                 rxd = &tpr->rx_std[i];
6313                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6314                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6315                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6316                                (i << RXD_OPAQUE_INDEX_SHIFT));
6317         }
6318 
6319         /* Now allocate fresh SKBs for each rx ring. */
6320         for (i = 0; i < tp->rx_pending; i++) {
6321                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6322                         netdev_warn(tp->dev,
6323                                     "Using a smaller RX standard ring. Only "
6324                                     "%d out of %d buffers were allocated "
6325                                     "successfully\n", i, tp->rx_pending);
6326                         if (i == 0)
6327                                 goto initfail;
6328                         tp->rx_pending = i;
6329                         break;
6330                 }
6331         }
6332 
6333         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6334                 goto done;
6335 
6336         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6337 
6338         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6339                 goto done;
6340 
6341         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6342                 struct tg3_rx_buffer_desc *rxd;
6343 
6344                 rxd = &tpr->rx_jmb[i].std;
6345                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6346                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6347                                   RXD_FLAG_JUMBO;
6348                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6349                        (i << RXD_OPAQUE_INDEX_SHIFT));
6350         }
6351 
6352         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6353                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6354                         netdev_warn(tp->dev,
6355                                     "Using a smaller RX jumbo ring. Only %d "
6356                                     "out of %d buffers were allocated "
6357                                     "successfully\n", i, tp->rx_jumbo_pending);
6358                         if (i == 0)
6359                                 goto initfail;
6360                         tp->rx_jumbo_pending = i;
6361                         break;
6362                 }
6363         }
6364 
6365 done:
6366         return 0;
6367 
6368 initfail:
6369         tg3_rx_prodring_free(tp, tpr);
6370         return -ENOMEM;
6371 }
6372 
6373 static void tg3_rx_prodring_fini(struct tg3 *tp,
6374                                  struct tg3_rx_prodring_set *tpr)
6375 {
6376         kfree(tpr->rx_std_buffers);
6377         tpr->rx_std_buffers = NULL;
6378         kfree(tpr->rx_jmb_buffers);
6379         tpr->rx_jmb_buffers = NULL;
6380         if (tpr->rx_std) {
6381                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6382                                   tpr->rx_std, tpr->rx_std_mapping);
6383                 tpr->rx_std = NULL;
6384         }
6385         if (tpr->rx_jmb) {
6386                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6387                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6388                 tpr->rx_jmb = NULL;
6389         }
6390 }
6391 
6392 static int tg3_rx_prodring_init(struct tg3 *tp,
6393                                 struct tg3_rx_prodring_set *tpr)
6394 {
6395         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6396                                       GFP_KERNEL);
6397         if (!tpr->rx_std_buffers)
6398                 return -ENOMEM;
6399 
6400         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6401                                          TG3_RX_STD_RING_BYTES(tp),
6402                                          &tpr->rx_std_mapping,
6403                                          GFP_KERNEL);
6404         if (!tpr->rx_std)
6405                 goto err_out;
6406 
6407         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6408                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6409                                               GFP_KERNEL);
6410                 if (!tpr->rx_jmb_buffers)
6411                         goto err_out;
6412 
6413                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6414                                                  TG3_RX_JMB_RING_BYTES(tp),
6415                                                  &tpr->rx_jmb_mapping,
6416                                                  GFP_KERNEL);
6417                 if (!tpr->rx_jmb)
6418                         goto err_out;
6419         }
6420 
6421         return 0;
6422 
6423 err_out:
6424         tg3_rx_prodring_fini(tp, tpr);
6425         return -ENOMEM;
6426 }
6427 
6428 /* Free up pending packets in all rx/tx rings.
6429  *
6430  * The chip has been shut down and the driver detached from
6431  * the networking, so no interrupts or new tx packets will
6432  * end up in the driver.  tp->{tx,}lock is not held and we are not
6433  * in an interrupt context and thus may sleep.
6434  */
6435 static void tg3_free_rings(struct tg3 *tp)
6436 {
6437         int i, j;
6438 
6439         for (j = 0; j < tp->irq_cnt; j++) {
6440                 struct tg3_napi *tnapi = &tp->napi[j];
6441 
6442                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6443 
6444                 if (!tnapi->tx_buffers)
6445                         continue;
6446 
6447                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6448                         struct ring_info *txp;
6449                         struct sk_buff *skb;
6450                         unsigned int k;
6451 
6452                         txp = &tnapi->tx_buffers[i];
6453                         skb = txp->skb;
6454 
6455                         if (skb == NULL) {
6456                                 i++;
6457                                 continue;
6458                         }
6459 
6460                         pci_unmap_single(tp->pdev,
6461                                          dma_unmap_addr(txp, mapping),
6462                                          skb_headlen(skb),
6463                                          PCI_DMA_TODEVICE);
6464                         txp->skb = NULL;
6465 
6466                         i++;
6467 
6468                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6469                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6470                                 pci_unmap_page(tp->pdev,
6471                                                dma_unmap_addr(txp, mapping),
6472                                                skb_shinfo(skb)->frags[k].size,
6473                                                PCI_DMA_TODEVICE);
6474                                 i++;
6475                         }
6476 
6477                         dev_kfree_skb_any(skb);
6478                 }
6479         }
6480 }
6481 
6482 /* Initialize tx/rx rings for packet processing.
6483  *
6484  * The chip has been shut down and the driver detached from
6485  * the networking, so no interrupts or new tx packets will
6486  * end up in the driver.  tp->{tx,}lock are held and thus
6487  * we may not sleep.
6488  */
6489 static int tg3_init_rings(struct tg3 *tp)
6490 {
6491         int i;
6492 
6493         /* Free up all the SKBs. */
6494         tg3_free_rings(tp);
6495 
6496         for (i = 0; i < tp->irq_cnt; i++) {
6497                 struct tg3_napi *tnapi = &tp->napi[i];
6498 
6499                 tnapi->last_tag = 0;
6500                 tnapi->last_irq_tag = 0;
6501                 tnapi->hw_status->status = 0;
6502                 tnapi->hw_status->status_tag = 0;
6503                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6504 
6505                 tnapi->tx_prod = 0;
6506                 tnapi->tx_cons = 0;
6507                 if (tnapi->tx_ring)
6508                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6509 
6510                 tnapi->rx_rcb_ptr = 0;
6511                 if (tnapi->rx_rcb)
6512                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6513 
6514                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6515                         tg3_free_rings(tp);
6516                         return -ENOMEM;
6517                 }
6518         }
6519 
6520         return 0;
6521 }
6522 
6523 /*
6524  * Must not be invoked with interrupt sources disabled and
6525  * the hardware shutdown down.
6526  */
6527 static void tg3_free_consistent(struct tg3 *tp)
6528 {
6529         int i;
6530 
6531         for (i = 0; i < tp->irq_cnt; i++) {
6532                 struct tg3_napi *tnapi = &tp->napi[i];
6533 
6534                 if (tnapi->tx_ring) {
6535                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6536                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6537                         tnapi->tx_ring = NULL;
6538                 }
6539 
6540                 kfree(tnapi->tx_buffers);
6541                 tnapi->tx_buffers = NULL;
6542 
6543                 if (tnapi->rx_rcb) {
6544                         dma_free_coherent(&tp->pdev->dev,
6545                                           TG3_RX_RCB_RING_BYTES(tp),
6546                                           tnapi->rx_rcb,
6547                                           tnapi->rx_rcb_mapping);
6548                         tnapi->rx_rcb = NULL;
6549                 }
6550 
6551                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6552 
6553                 if (tnapi->hw_status) {
6554                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6555                                           tnapi->hw_status,
6556                                           tnapi->status_mapping);
6557                         tnapi->hw_status = NULL;
6558                 }
6559         }
6560 
6561         if (tp->hw_stats) {
6562                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6563                                   tp->hw_stats, tp->stats_mapping);
6564                 tp->hw_stats = NULL;
6565         }
6566 }
6567 
6568 /*
6569  * Must not be invoked with interrupt sources disabled and
6570  * the hardware shutdown down.  Can sleep.
6571  */
6572 static int tg3_alloc_consistent(struct tg3 *tp)
6573 {
6574         int i;
6575 
6576         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6577                                           sizeof(struct tg3_hw_stats),
6578                                           &tp->stats_mapping,
6579                                           GFP_KERNEL);
6580         if (!tp->hw_stats)
6581                 goto err_out;
6582 
6583         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6584 
6585         for (i = 0; i < tp->irq_cnt; i++) {
6586                 struct tg3_napi *tnapi = &tp->napi[i];
6587                 struct tg3_hw_status *sblk;
6588 
6589                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6590                                                       TG3_HW_STATUS_SIZE,
6591                                                       &tnapi->status_mapping,
6592                                                       GFP_KERNEL);
6593                 if (!tnapi->hw_status)
6594                         goto err_out;
6595 
6596                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6597                 sblk = tnapi->hw_status;
6598 
6599                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6600                         goto err_out;
6601 
6602                 /* If multivector TSS is enabled, vector 0 does not handle
6603                  * tx interrupts.  Don't allocate any resources for it.
6604                  */
6605                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6606                     (i && tg3_flag(tp, ENABLE_TSS))) {
6607                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6608                                                     TG3_TX_RING_SIZE,
6609                                                     GFP_KERNEL);
6610                         if (!tnapi->tx_buffers)
6611                                 goto err_out;
6612 
6613                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6614                                                             TG3_TX_RING_BYTES,
6615                                                         &tnapi->tx_desc_mapping,
6616                                                             GFP_KERNEL);
6617                         if (!tnapi->tx_ring)
6618                                 goto err_out;
6619                 }
6620 
6621                 /*
6622                  * When RSS is enabled, the status block format changes
6623                  * slightly.  The "rx_jumbo_consumer", "reserved",
6624                  * and "rx_mini_consumer" members get mapped to the
6625                  * other three rx return ring producer indexes.
6626                  */
6627                 switch (i) {
6628                 default:
6629                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6630                         break;
6631                 case 2:
6632                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6633                         break;
6634                 case 3:
6635                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6636                         break;
6637                 case 4:
6638                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6639                         break;
6640                 }
6641 
6642                 /*
6643                  * If multivector RSS is enabled, vector 0 does not handle
6644                  * rx or tx interrupts.  Don't allocate any resources for it.
6645                  */
6646                 if (!i && tg3_flag(tp, ENABLE_RSS))
6647                         continue;
6648 
6649                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6650                                                    TG3_RX_RCB_RING_BYTES(tp),
6651                                                    &tnapi->rx_rcb_mapping,
6652                                                    GFP_KERNEL);
6653                 if (!tnapi->rx_rcb)
6654                         goto err_out;
6655 
6656                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6657         }
6658 
6659         return 0;
6660 
6661 err_out:
6662         tg3_free_consistent(tp);
6663         return -ENOMEM;
6664 }
6665 
6666 #define MAX_WAIT_CNT 1000
6667 
6668 /* To stop a block, clear the enable bit and poll till it
6669  * clears.  tp->lock is held.
6670  */
6671 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6672 {
6673         unsigned int i;
6674         u32 val;
6675 
6676         if (tg3_flag(tp, 5705_PLUS)) {
6677                 switch (ofs) {
6678                 case RCVLSC_MODE:
6679                 case DMAC_MODE:
6680                 case MBFREE_MODE:
6681                 case BUFMGR_MODE:
6682                 case MEMARB_MODE:
6683                         /* We can't enable/disable these bits of the
6684                          * 5705/5750, just say success.
6685                          */
6686                         return 0;
6687 
6688                 default:
6689                         break;
6690                 }
6691         }
6692 
6693         val = tr32(ofs);
6694         val &= ~enable_bit;
6695         tw32_f(ofs, val);
6696 
6697         for (i = 0; i < MAX_WAIT_CNT; i++) {
6698                 udelay(100);
6699                 val = tr32(ofs);
6700                 if ((val & enable_bit) == 0)
6701                         break;
6702         }
6703 
6704         if (i == MAX_WAIT_CNT && !silent) {
6705                 dev_err(&tp->pdev->dev,
6706                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6707                         ofs, enable_bit);
6708                 return -ENODEV;
6709         }
6710 
6711         return 0;
6712 }
6713 
6714 /* tp->lock is held. */
6715 static int tg3_abort_hw(struct tg3 *tp, int silent)
6716 {
6717         int i, err;
6718 
6719         tg3_disable_ints(tp);
6720 
6721         tp->rx_mode &= ~RX_MODE_ENABLE;
6722         tw32_f(MAC_RX_MODE, tp->rx_mode);
6723         udelay(10);
6724 
6725         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6726         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6727         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6728         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6729         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6730         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6731 
6732         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6733         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6734         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6735         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6736         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6737         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6738         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6739 
6740         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6741         tw32_f(MAC_MODE, tp->mac_mode);
6742         udelay(40);
6743 
6744         tp->tx_mode &= ~TX_MODE_ENABLE;
6745         tw32_f(MAC_TX_MODE, tp->tx_mode);
6746 
6747         for (i = 0; i < MAX_WAIT_CNT; i++) {
6748                 udelay(100);
6749                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6750                         break;
6751         }
6752         if (i >= MAX_WAIT_CNT) {
6753                 dev_err(&tp->pdev->dev,
6754                         "%s timed out, TX_MODE_ENABLE will not clear "
6755                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6756                 err |= -ENODEV;
6757         }
6758 
6759         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6760         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6761         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6762 
6763         tw32(FTQ_RESET, 0xffffffff);
6764         tw32(FTQ_RESET, 0x00000000);
6765 
6766         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6767         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6768 
6769         for (i = 0; i < tp->irq_cnt; i++) {
6770                 struct tg3_napi *tnapi = &tp->napi[i];
6771                 if (tnapi->hw_status)
6772                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6773         }
6774         if (tp->hw_stats)
6775                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6776 
6777         return err;
6778 }
6779 
6780 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6781 {
6782         int i;
6783         u32 apedata;
6784 
6785         /* NCSI does not support APE events */
6786         if (tg3_flag(tp, APE_HAS_NCSI))
6787                 return;
6788 
6789         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6790         if (apedata != APE_SEG_SIG_MAGIC)
6791                 return;
6792 
6793         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6794         if (!(apedata & APE_FW_STATUS_READY))
6795                 return;
6796 
6797         /* Wait for up to 1 millisecond for APE to service previous event. */
6798         for (i = 0; i < 10; i++) {
6799                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6800                         return;
6801 
6802                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6803 
6804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6805                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6806                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6807 
6808                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6809 
6810                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6811                         break;
6812 
6813                 udelay(100);
6814         }
6815 
6816         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6817                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6818 }
6819 
6820 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6821 {
6822         u32 event;
6823         u32 apedata;
6824 
6825         if (!tg3_flag(tp, ENABLE_APE))
6826                 return;
6827 
6828         switch (kind) {
6829         case RESET_KIND_INIT:
6830                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6831                                 APE_HOST_SEG_SIG_MAGIC);
6832                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6833                                 APE_HOST_SEG_LEN_MAGIC);
6834                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6835                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6836                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6837                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6838                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6839                                 APE_HOST_BEHAV_NO_PHYLOCK);
6840                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6841                                     TG3_APE_HOST_DRVR_STATE_START);
6842 
6843                 event = APE_EVENT_STATUS_STATE_START;
6844                 break;
6845         case RESET_KIND_SHUTDOWN:
6846                 /* With the interface we are currently using,
6847                  * APE does not track driver state.  Wiping
6848                  * out the HOST SEGMENT SIGNATURE forces
6849                  * the APE to assume OS absent status.
6850                  */
6851                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6852 
6853                 if (device_may_wakeup(&tp->pdev->dev) &&
6854                     tg3_flag(tp, WOL_ENABLE)) {
6855                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6856                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6857                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6858                 } else
6859                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6860 
6861                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6862 
6863                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6864                 break;
6865         case RESET_KIND_SUSPEND:
6866                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6867                 break;
6868         default:
6869                 return;
6870         }
6871 
6872         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6873 
6874         tg3_ape_send_event(tp, event);
6875 }
6876 
6877 /* tp->lock is held. */
6878 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6879 {
6880         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6881                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6882 
6883         if (tg3_flag(