Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/staging/et131x/et131x.c

  1 /* Agere Systems Inc.
  2  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
  3  *
  4  * Copyright © 2005 Agere Systems Inc.
  5  * All rights reserved.
  6  *   http://www.agere.com
  7  *
  8  * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
  9  *
 10  *------------------------------------------------------------------------------
 11  *
 12  * SOFTWARE LICENSE
 13  *
 14  * This software is provided subject to the following terms and conditions,
 15  * which you should read carefully before using the software.  Using this
 16  * software indicates your acceptance of these terms and conditions.  If you do
 17  * not agree with these terms and conditions, do not use the software.
 18  *
 19  * Copyright © 2005 Agere Systems Inc.
 20  * All rights reserved.
 21  *
 22  * Redistribution and use in source or binary forms, with or without
 23  * modifications, are permitted provided that the following conditions are met:
 24  *
 25  * . Redistributions of source code must retain the above copyright notice, this
 26  *    list of conditions and the following Disclaimer as comments in the code as
 27  *    well as in the documentation and/or other materials provided with the
 28  *    distribution.
 29  *
 30  * . Redistributions in binary form must reproduce the above copyright notice,
 31  *    this list of conditions and the following Disclaimer in the documentation
 32  *    and/or other materials provided with the distribution.
 33  *
 34  * . Neither the name of Agere Systems Inc. nor the names of the contributors
 35  *    may be used to endorse or promote products derived from this software
 36  *    without specific prior written permission.
 37  *
 38  * Disclaimer
 39  *
 40  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
 41  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
 42  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
 43  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
 44  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
 45  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 46  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 47  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 48  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
 49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
 50  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 51  * DAMAGE.
 52  */
 53 
 54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 55 
 56 #include <linux/pci.h>
 57 #include <linux/module.h>
 58 #include <linux/types.h>
 59 #include <linux/kernel.h>
 60 
 61 #include <linux/sched.h>
 62 #include <linux/ptrace.h>
 63 #include <linux/slab.h>
 64 #include <linux/ctype.h>
 65 #include <linux/string.h>
 66 #include <linux/timer.h>
 67 #include <linux/interrupt.h>
 68 #include <linux/in.h>
 69 #include <linux/delay.h>
 70 #include <linux/bitops.h>
 71 #include <linux/io.h>
 72 
 73 #include <linux/netdevice.h>
 74 #include <linux/etherdevice.h>
 75 #include <linux/skbuff.h>
 76 #include <linux/if_arp.h>
 77 #include <linux/ioport.h>
 78 #include <linux/crc32.h>
 79 #include <linux/random.h>
 80 #include <linux/phy.h>
 81 
 82 #include "et131x.h"
 83 
 84 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
 85 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
 86 MODULE_LICENSE("Dual BSD/GPL");
 87 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
 88 
 89 /* EEPROM defines */
 90 #define MAX_NUM_REGISTER_POLLS          1000
 91 #define MAX_NUM_WRITE_RETRIES           2
 92 
 93 /* MAC defines */
 94 #define COUNTER_WRAP_16_BIT 0x10000
 95 #define COUNTER_WRAP_12_BIT 0x1000
 96 
 97 /* PCI defines */
 98 #define INTERNAL_MEM_SIZE       0x400   /* 1024 of internal memory */
 99 #define INTERNAL_MEM_RX_OFFSET  0x1FF   /* 50%   Tx, 50%   Rx */
100 
101 /* ISR defines */
102 /* For interrupts, normal running is:
103  *       rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
104  *       watchdog_interrupt & txdma_xfer_done
105  *
106  * In both cases, when flow control is enabled for either Tx or bi-direction,
107  * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
108  * buffer rings are running low.
109  */
110 #define INT_MASK_DISABLE            0xffffffff
111 
112 /* NOTE: Masking out MAC_STAT Interrupt for now...
113  * #define INT_MASK_ENABLE             0xfff6bf17
114  * #define INT_MASK_ENABLE_NO_FLOW     0xfff6bfd7
115  */
116 #define INT_MASK_ENABLE             0xfffebf17
117 #define INT_MASK_ENABLE_NO_FLOW     0xfffebfd7
118 
119 /* General defines */
120 /* Packet and header sizes */
121 #define NIC_MIN_PACKET_SIZE     60
122 
123 /* Multicast list size */
124 #define NIC_MAX_MCAST_LIST      128
125 
126 /* Supported Filters */
127 #define ET131X_PACKET_TYPE_DIRECTED             0x0001
128 #define ET131X_PACKET_TYPE_MULTICAST            0x0002
129 #define ET131X_PACKET_TYPE_BROADCAST            0x0004
130 #define ET131X_PACKET_TYPE_PROMISCUOUS          0x0008
131 #define ET131X_PACKET_TYPE_ALL_MULTICAST        0x0010
132 
133 /* Tx Timeout */
134 #define ET131X_TX_TIMEOUT       (1 * HZ)
135 #define NIC_SEND_HANG_THRESHOLD 0
136 
137 /* MP_TCB flags */
138 #define FMP_DEST_MULTI                  0x00000001
139 #define FMP_DEST_BROAD                  0x00000002
140 
141 /* MP_ADAPTER flags */
142 #define FMP_ADAPTER_INTERRUPT_IN_USE    0x00000008
143 
144 /* MP_SHARED flags */
145 #define FMP_ADAPTER_LOWER_POWER         0x00200000
146 
147 #define FMP_ADAPTER_NON_RECOVER_ERROR   0x00800000
148 #define FMP_ADAPTER_HARDWARE_ERROR      0x04000000
149 
150 #define FMP_ADAPTER_FAIL_SEND_MASK      0x3ff00000
151 
152 /* Some offsets in PCI config space that are actually used. */
153 #define ET1310_PCI_MAC_ADDRESS          0xA4
154 #define ET1310_PCI_EEPROM_STATUS        0xB2
155 #define ET1310_PCI_ACK_NACK             0xC0
156 #define ET1310_PCI_REPLAY               0xC2
157 #define ET1310_PCI_L0L1LATENCY          0xCF
158 
159 /* PCI Product IDs */
160 #define ET131X_PCI_DEVICE_ID_GIG        0xED00  /* ET1310 1000 Base-T 8 */
161 #define ET131X_PCI_DEVICE_ID_FAST       0xED01  /* ET1310 100  Base-T */
162 
163 /* Define order of magnitude converter */
164 #define NANO_IN_A_MICRO 1000
165 
166 #define PARM_RX_NUM_BUFS_DEF    4
167 #define PARM_RX_TIME_INT_DEF    10
168 #define PARM_RX_MEM_END_DEF     0x2bc
169 #define PARM_TX_TIME_INT_DEF    40
170 #define PARM_TX_NUM_BUFS_DEF    4
171 #define PARM_DMA_CACHE_DEF      0
172 
173 /* RX defines */
174 #define FBR_CHUNKS              32
175 #define MAX_DESC_PER_RING_RX    1024
176 
177 /* number of RFDs - default and min */
178 #define RFD_LOW_WATER_MARK      40
179 #define NIC_DEFAULT_NUM_RFD     1024
180 #define NUM_FBRS                2
181 
182 #define NUM_PACKETS_HANDLED     256
183 
184 #define ALCATEL_MULTICAST_PKT   0x01000000
185 #define ALCATEL_BROADCAST_PKT   0x02000000
186 
187 /* typedefs for Free Buffer Descriptors */
188 struct fbr_desc {
189         u32 addr_lo;
190         u32 addr_hi;
191         u32 word2;              /* Bits 10-31 reserved, 0-9 descriptor */
192 };
193 
194 /* Packet Status Ring Descriptors
195  *
196  * Word 0:
197  *
198  * top 16 bits are from the Alcatel Status Word as enumerated in
199  * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
200  *
201  * 0: hp                        hash pass
202  * 1: ipa                       IP checksum assist
203  * 2: ipp                       IP checksum pass
204  * 3: tcpa                      TCP checksum assist
205  * 4: tcpp                      TCP checksum pass
206  * 5: wol                       WOL Event
207  * 6: rxmac_error               RXMAC Error Indicator
208  * 7: drop                      Drop packet
209  * 8: ft                        Frame Truncated
210  * 9: jp                        Jumbo Packet
211  * 10: vp                       VLAN Packet
212  * 11-15: unused
213  * 16: asw_prev_pkt_dropped     e.g. IFG too small on previous
214  * 17: asw_RX_DV_event          short receive event detected
215  * 18: asw_false_carrier_event  bad carrier since last good packet
216  * 19: asw_code_err             one or more nibbles signalled as errors
217  * 20: asw_CRC_err              CRC error
218  * 21: asw_len_chk_err          frame length field incorrect
219  * 22: asw_too_long             frame length > 1518 bytes
220  * 23: asw_OK                   valid CRC + no code error
221  * 24: asw_multicast            has a multicast address
222  * 25: asw_broadcast            has a broadcast address
223  * 26: asw_dribble_nibble       spurious bits after EOP
224  * 27: asw_control_frame        is a control frame
225  * 28: asw_pause_frame          is a pause frame
226  * 29: asw_unsupported_op       unsupported OP code
227  * 30: asw_VLAN_tag             VLAN tag detected
228  * 31: asw_long_evt             Rx long event
229  *
230  * Word 1:
231  * 0-15: length                 length in bytes
232  * 16-25: bi                    Buffer Index
233  * 26-27: ri                    Ring Index
234  * 28-31: reserved
235  */
236 
237 struct pkt_stat_desc {
238         u32 word0;
239         u32 word1;
240 };
241 
242 /* Typedefs for the RX DMA status word */
243 
244 /* rx status word 0 holds part of the status bits of the Rx DMA engine
245  * that get copied out to memory by the ET-1310.  Word 0 is a 32 bit word
246  * which contains the Free Buffer ring 0 and 1 available offset.
247  *
248  * bit 0-9 FBR1 offset
249  * bit 10 Wrap flag for FBR1
250  * bit 16-25 FBR0 offset
251  * bit 26 Wrap flag for FBR0
252  */
253 
254 /* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
255  * that get copied out to memory by the ET-1310.  Word 3 is a 32 bit word
256  * which contains the Packet Status Ring available offset.
257  *
258  * bit 0-15 reserved
259  * bit 16-27 PSRoffset
260  * bit 28 PSRwrap
261  * bit 29-31 unused
262  */
263 
264 /* struct rx_status_block is a structure representing the status of the Rx
265  * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
266  */
267 struct rx_status_block {
268         u32 word0;
269         u32 word1;
270 };
271 
272 /* Structure for look-up table holding free buffer ring pointers, addresses
273  * and state.
274  */
275 struct fbr_lookup {
276         void            *virt[MAX_DESC_PER_RING_RX];
277         u32              bus_high[MAX_DESC_PER_RING_RX];
278         u32              bus_low[MAX_DESC_PER_RING_RX];
279         void            *ring_virtaddr;
280         dma_addr_t       ring_physaddr;
281         void            *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
282         dma_addr_t       mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
283         u32              local_full;
284         u32              num_entries;
285         dma_addr_t       buffsize;
286 };
287 
288 /* struct rx_ring is the sructure representing the adaptor's local
289  * reference(s) to the rings
290  */
291 struct rx_ring {
292         struct fbr_lookup *fbr[NUM_FBRS];
293         void *ps_ring_virtaddr;
294         dma_addr_t ps_ring_physaddr;
295         u32 local_psr_full;
296         u32 psr_num_entries;
297 
298         struct rx_status_block *rx_status_block;
299         dma_addr_t rx_status_bus;
300 
301         /* RECV */
302         struct list_head recv_list;
303         u32 num_ready_recv;
304 
305         u32 num_rfd;
306 
307         bool unfinished_receives;
308 };
309 
310 /* TX defines */
311 /* word 2 of the control bits in the Tx Descriptor ring for the ET-1310
312  *
313  * 0-15: length of packet
314  * 16-27: VLAN tag
315  * 28: VLAN CFI
316  * 29-31: VLAN priority
317  *
318  * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
319  *
320  * 0: last packet in the sequence
321  * 1: first packet in the sequence
322  * 2: interrupt the processor when this pkt sent
323  * 3: Control word - no packet data
324  * 4: Issue half-duplex backpressure : XON/XOFF
325  * 5: send pause frame
326  * 6: Tx frame has error
327  * 7: append CRC
328  * 8: MAC override
329  * 9: pad packet
330  * 10: Packet is a Huge packet
331  * 11: append VLAN tag
332  * 12: IP checksum assist
333  * 13: TCP checksum assist
334  * 14: UDP checksum assist
335  */
336 
337 #define TXDESC_FLAG_LASTPKT             0x0001
338 #define TXDESC_FLAG_FIRSTPKT            0x0002
339 #define TXDESC_FLAG_INTPROC             0x0004
340 
341 /* struct tx_desc represents each descriptor on the ring */
342 struct tx_desc {
343         u32 addr_hi;
344         u32 addr_lo;
345         u32 len_vlan;   /* control words how to xmit the */
346         u32 flags;      /* data (detailed above) */
347 };
348 
349 /* The status of the Tx DMA engine it sits in free memory, and is pointed to
350  * by 0x101c / 0x1020. This is a DMA10 type
351  */
352 
353 /* TCB (Transmit Control Block: Host Side) */
354 struct tcb {
355         struct tcb *next;       /* Next entry in ring */
356         u32 flags;              /* Our flags for the packet */
357         u32 count;              /* Used to spot stuck/lost packets */
358         u32 stale;              /* Used to spot stuck/lost packets */
359         struct sk_buff *skb;    /* Network skb we are tied to */
360         u32 index;              /* Ring indexes */
361         u32 index_start;
362 };
363 
364 /* Structure representing our local reference(s) to the ring */
365 struct tx_ring {
366         /* TCB (Transmit Control Block) memory and lists */
367         struct tcb *tcb_ring;
368 
369         /* List of TCBs that are ready to be used */
370         struct tcb *tcb_qhead;
371         struct tcb *tcb_qtail;
372 
373         /* list of TCBs that are currently being sent.  NOTE that access to all
374          * three of these (including used) are controlled via the
375          * TCBSendQLock.  This lock should be secured prior to incementing /
376          * decrementing used, or any queue manipulation on send_head /
377          * tail
378          */
379         struct tcb *send_head;
380         struct tcb *send_tail;
381         int used;
382 
383         /* The actual descriptor ring */
384         struct tx_desc *tx_desc_ring;
385         dma_addr_t tx_desc_ring_pa;
386 
387         /* send_idx indicates where we last wrote to in the descriptor ring. */
388         u32 send_idx;
389 
390         /* The location of the write-back status block */
391         u32 *tx_status;
392         dma_addr_t tx_status_pa;
393 
394         /* Packets since the last IRQ: used for interrupt coalescing */
395         int since_irq;
396 };
397 
398 /* Do not change these values: if changed, then change also in respective
399  * TXdma and Rxdma engines
400  */
401 #define NUM_DESC_PER_RING_TX         512    /* TX Do not change these values */
402 #define NUM_TCB                      64
403 
404 /* These values are all superseded by registry entries to facilitate tuning.
405  * Once the desired performance has been achieved, the optimal registry values
406  * should be re-populated to these #defines:
407  */
408 #define TX_ERROR_PERIOD             1000
409 
410 #define LO_MARK_PERCENT_FOR_PSR     15
411 #define LO_MARK_PERCENT_FOR_RX      15
412 
413 /* RFD (Receive Frame Descriptor) */
414 struct rfd {
415         struct list_head list_node;
416         struct sk_buff *skb;
417         u32 len;        /* total size of receive frame */
418         u16 bufferindex;
419         u8 ringindex;
420 };
421 
422 /* Flow Control */
423 #define FLOW_BOTH       0
424 #define FLOW_TXONLY     1
425 #define FLOW_RXONLY     2
426 #define FLOW_NONE       3
427 
428 /* Struct to define some device statistics */
429 struct ce_stats {
430         /* MIB II variables
431          *
432          * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
433          * MUST have 32, then we'll need another way to perform atomic
434          * operations
435          */
436         u32             unicast_pkts_rcvd;
437         atomic_t        unicast_pkts_xmtd;
438         u32             multicast_pkts_rcvd;
439         atomic_t        multicast_pkts_xmtd;
440         u32             broadcast_pkts_rcvd;
441         atomic_t        broadcast_pkts_xmtd;
442         u32             rcvd_pkts_dropped;
443 
444         /* Tx Statistics. */
445         u32             tx_underflows;
446 
447         u32             tx_collisions;
448         u32             tx_excessive_collisions;
449         u32             tx_first_collisions;
450         u32             tx_late_collisions;
451         u32             tx_max_pkt_errs;
452         u32             tx_deferred;
453 
454         /* Rx Statistics. */
455         u32             rx_overflows;
456 
457         u32             rx_length_errs;
458         u32             rx_align_errs;
459         u32             rx_crc_errs;
460         u32             rx_code_violations;
461         u32             rx_other_errs;
462 
463         u32             synchronous_iterations;
464         u32             interrupt_status;
465 };
466 
467 /* The private adapter structure */
468 struct et131x_adapter {
469         struct net_device *netdev;
470         struct pci_dev *pdev;
471         struct mii_bus *mii_bus;
472         struct phy_device *phydev;
473         struct work_struct task;
474 
475         /* Flags that indicate current state of the adapter */
476         u32 flags;
477 
478         /* local link state, to determine if a state change has occurred */
479         int link;
480 
481         /* Configuration  */
482         u8 rom_addr[ETH_ALEN];
483         u8 addr[ETH_ALEN];
484         bool has_eeprom;
485         u8 eeprom_data[2];
486 
487         /* Spinlocks */
488         spinlock_t tcb_send_qlock;
489         spinlock_t tcb_ready_qlock;
490         spinlock_t send_hw_lock;
491 
492         spinlock_t rcv_lock;
493         spinlock_t fbr_lock;
494 
495         /* Packet Filter and look ahead size */
496         u32 packet_filter;
497 
498         /* multicast list */
499         u32 multicast_addr_count;
500         u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
501 
502         /* Pointer to the device's PCI register space */
503         struct address_map __iomem *regs;
504 
505         /* Registry parameters */
506         u8 wanted_flow;         /* Flow we want for 802.3x flow control */
507         u32 registry_jumbo_packet;      /* Max supported ethernet packet size */
508 
509         /* Derived from the registry: */
510         u8 flowcontrol;         /* flow control validated by the far-end */
511 
512         /* Minimize init-time */
513         struct timer_list error_timer;
514 
515         /* variable putting the phy into coma mode when boot up with no cable
516          * plugged in after 5 seconds
517          */
518         u8 boot_coma;
519 
520         /* Next two used to save power information at power down. This
521          * information will be used during power up to set up parts of Power
522          * Management in JAGCore
523          */
524         u16 pdown_speed;
525         u8 pdown_duplex;
526 
527         /* Tx Memory Variables */
528         struct tx_ring tx_ring;
529 
530         /* Rx Memory Variables */
531         struct rx_ring rx_ring;
532 
533         /* Stats */
534         struct ce_stats stats;
535 
536         struct net_device_stats net_stats;
537 };
538 
539 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
540 {
541         u32 reg;
542         int i;
543 
544         /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
545          *    bits 7,1:0 both equal to 1, at least once after reset.
546          *    Subsequent operations need only to check that bits 1:0 are equal
547          *    to 1 prior to starting a single byte read/write
548          */
549 
550         for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
551                 /* Read registers grouped in DWORD1 */
552                 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
553                         return -EIO;
554 
555                 /* I2C idle and Phy Queue Avail both true */
556                 if ((reg & 0x3000) == 0x3000) {
557                         if (status)
558                                 *status = reg;
559                         return reg & 0xFF;
560                 }
561         }
562         return -ETIMEDOUT;
563 }
564 
565 /* eeprom_write - Write a byte to the ET1310's EEPROM
566  * @adapter: pointer to our private adapter structure
567  * @addr: the address to write
568  * @data: the value to write
569  *
570  * Returns 1 for a successful write.
571  */
572 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
573 {
574         struct pci_dev *pdev = adapter->pdev;
575         int index = 0;
576         int retries;
577         int err = 0;
578         int i2c_wack = 0;
579         int writeok = 0;
580         u32 status;
581         u32 val = 0;
582 
583         /* For an EEPROM, an I2C single byte write is defined as a START
584          * condition followed by the device address, EEPROM address, one byte
585          * of data and a STOP condition.  The STOP condition will trigger the
586          * EEPROM's internally timed write cycle to the nonvolatile memory.
587          * All inputs are disabled during this write cycle and the EEPROM will
588          * not respond to any access until the internal write is complete.
589          */
590 
591         err = eeprom_wait_ready(pdev, NULL);
592         if (err < 0)
593                 return err;
594 
595          /* 2. Write to the LBCIF Control Register:  bit 7=1, bit 6=1, bit 3=0,
596           *    and bits 1:0 both =0.  Bit 5 should be set according to the
597           *    type of EEPROM being accessed (1=two byte addressing, 0=one
598           *    byte addressing).
599           */
600         if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
601                         LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
602                 return -EIO;
603 
604         i2c_wack = 1;
605 
606         /* Prepare EEPROM address for Step 3 */
607 
608         for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
609                 /* Write the address to the LBCIF Address Register */
610                 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
611                         break;
612                 /* Write the data to the LBCIF Data Register (the I2C write
613                  * will begin).
614                  */
615                 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
616                         break;
617                 /* Monitor bit 1:0 of the LBCIF Status Register.  When bits
618                  * 1:0 are both equal to 1, the I2C write has completed and the
619                  * internal write cycle of the EEPROM is about to start.
620                  * (bits 1:0 = 01 is a legal state while waiting from both
621                  * equal to 1, but bits 1:0 = 10 is invalid and implies that
622                  * something is broken).
623                  */
624                 err = eeprom_wait_ready(pdev, &status);
625                 if (err < 0)
626                         return 0;
627 
628                 /* Check bit 3 of the LBCIF Status Register.  If  equal to 1,
629                  * an error has occurred.Don't break here if we are revision
630                  * 1, this is so we do a blind write for load bug.
631                  */
632                 if ((status & LBCIF_STATUS_GENERAL_ERROR)
633                         && adapter->pdev->revision == 0)
634                         break;
635 
636                 /* Check bit 2 of the LBCIF Status Register.  If equal to 1 an
637                  * ACK error has occurred on the address phase of the write.
638                  * This could be due to an actual hardware failure or the
639                  * EEPROM may still be in its internal write cycle from a
640                  * previous write. This write operation was ignored and must be
641                   *repeated later.
642                  */
643                 if (status & LBCIF_STATUS_ACK_ERROR) {
644                         /* This could be due to an actual hardware failure
645                          * or the EEPROM may still be in its internal write
646                          * cycle from a previous write. This write operation
647                          * was ignored and must be repeated later.
648                          */
649                         udelay(10);
650                         continue;
651                 }
652 
653                 writeok = 1;
654                 break;
655         }
656 
657         /* Set bit 6 of the LBCIF Control Register = 0.
658          */
659         udelay(10);
660 
661         while (i2c_wack) {
662                 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
663                         LBCIF_CONTROL_LBCIF_ENABLE))
664                         writeok = 0;
665 
666                 /* Do read until internal ACK_ERROR goes away meaning write
667                  * completed
668                  */
669                 do {
670                         pci_write_config_dword(pdev,
671                                                LBCIF_ADDRESS_REGISTER,
672                                                addr);
673                         do {
674                                 pci_read_config_dword(pdev,
675                                         LBCIF_DATA_REGISTER, &val);
676                         } while ((val & 0x00010000) == 0);
677                 } while (val & 0x00040000);
678 
679                 if ((val & 0xFF00) != 0xC000 || index == 10000)
680                         break;
681                 index++;
682         }
683         return writeok ? 0 : -EIO;
684 }
685 
686 /* eeprom_read - Read a byte from the ET1310's EEPROM
687  * @adapter: pointer to our private adapter structure
688  * @addr: the address from which to read
689  * @pdata: a pointer to a byte in which to store the value of the read
690  * @eeprom_id: the ID of the EEPROM
691  * @addrmode: how the EEPROM is to be accessed
692  *
693  * Returns 1 for a successful read
694  */
695 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
696 {
697         struct pci_dev *pdev = adapter->pdev;
698         int err;
699         u32 status;
700 
701         /* A single byte read is similar to the single byte write, with the
702          * exception of the data flow:
703          */
704 
705         err = eeprom_wait_ready(pdev, NULL);
706         if (err < 0)
707                 return err;
708         /* Write to the LBCIF Control Register:  bit 7=1, bit 6=0, bit 3=0,
709          * and bits 1:0 both =0.  Bit 5 should be set according to the type
710          * of EEPROM being accessed (1=two byte addressing, 0=one byte
711          * addressing).
712          */
713         if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
714                                   LBCIF_CONTROL_LBCIF_ENABLE))
715                 return -EIO;
716         /* Write the address to the LBCIF Address Register (I2C read will
717          * begin).
718          */
719         if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
720                 return -EIO;
721         /* Monitor bit 0 of the LBCIF Status Register.  When = 1, I2C read
722          * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
723          * has occurred).
724          */
725         err = eeprom_wait_ready(pdev, &status);
726         if (err < 0)
727                 return err;
728         /* Regardless of error status, read data byte from LBCIF Data
729          * Register.
730          */
731         *pdata = err;
732         /* Check bit 2 of the LBCIF Status Register.  If = 1,
733          * then an error has occurred.
734          */
735         return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
736 }
737 
738 static int et131x_init_eeprom(struct et131x_adapter *adapter)
739 {
740         struct pci_dev *pdev = adapter->pdev;
741         u8 eestatus;
742 
743         /* We first need to check the EEPROM Status code located at offset
744          * 0xB2 of config space
745          */
746         pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
747 
748         /* THIS IS A WORKAROUND:
749          * I need to call this function twice to get my card in a
750          * LG M1 Express Dual running. I tried also a msleep before this
751          * function, because I thought there could be some time conditions
752          * but it didn't work. Call the whole function twice also work.
753          */
754         if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
755                 dev_err(&pdev->dev,
756                        "Could not read PCI config space for EEPROM Status\n");
757                 return -EIO;
758         }
759 
760         /* Determine if the error(s) we care about are present. If they are
761          * present we need to fail.
762          */
763         if (eestatus & 0x4C) {
764                 int write_failed = 0;
765 
766                 if (pdev->revision == 0x01) {
767                         int     i;
768                         static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
769 
770                         /* Re-write the first 4 bytes if we have an eeprom
771                          * present and the revision id is 1, this fixes the
772                          * corruption seen with 1310 B Silicon
773                          */
774                         for (i = 0; i < 3; i++)
775                                 if (eeprom_write(adapter, i, eedata[i]) < 0)
776                                         write_failed = 1;
777                 }
778                 if (pdev->revision  != 0x01 || write_failed) {
779                         dev_err(&pdev->dev,
780                             "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
781 
782                         /* This error could mean that there was an error
783                          * reading the eeprom or that the eeprom doesn't exist.
784                          * We will treat each case the same and not try to
785                          * gather additional information that normally would
786                          * come from the eeprom, like MAC Address
787                          */
788                         adapter->has_eeprom = 0;
789                         return -EIO;
790                 }
791         }
792         adapter->has_eeprom = 1;
793 
794         /* Read the EEPROM for information regarding LED behavior. Refer to
795          * ET1310_phy.c, et131x_xcvr_init(), for its use.
796          */
797         eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
798         eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
799 
800         if (adapter->eeprom_data[0] != 0xcd)
801                 /* Disable all optional features */
802                 adapter->eeprom_data[1] = 0x00;
803 
804         return 0;
805 }
806 
807 /* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
808  * @adapter: pointer to our adapter structure
809  */
810 static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
811 {
812         /* Setup the receive dma configuration register for normal operation */
813         u32 csr =  ET_RXDMA_CSR_FBR1_ENABLE;
814         struct rx_ring *rx_ring = &adapter->rx_ring;
815 
816         if (rx_ring->fbr[1]->buffsize == 4096)
817                 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
818         else if (rx_ring->fbr[1]->buffsize == 8192)
819                 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
820         else if (rx_ring->fbr[1]->buffsize == 16384)
821                 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
822 
823         csr |= ET_RXDMA_CSR_FBR0_ENABLE;
824         if (rx_ring->fbr[0]->buffsize == 256)
825                 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
826         else if (rx_ring->fbr[0]->buffsize == 512)
827                 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
828         else if (rx_ring->fbr[0]->buffsize == 1024)
829                 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
830         writel(csr, &adapter->regs->rxdma.csr);
831 
832         csr = readl(&adapter->regs->rxdma.csr);
833         if (csr & ET_RXDMA_CSR_HALT_STATUS) {
834                 udelay(5);
835                 csr = readl(&adapter->regs->rxdma.csr);
836                 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
837                         dev_err(&adapter->pdev->dev,
838                             "RX Dma failed to exit halt state.  CSR 0x%08x\n",
839                                 csr);
840                 }
841         }
842 }
843 
844 /* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
845  * @adapter: pointer to our adapter structure
846  */
847 static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
848 {
849         u32 csr;
850         /* Setup the receive dma configuration register */
851         writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
852                &adapter->regs->rxdma.csr);
853         csr = readl(&adapter->regs->rxdma.csr);
854         if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
855                 udelay(5);
856                 csr = readl(&adapter->regs->rxdma.csr);
857                 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
858                         dev_err(&adapter->pdev->dev,
859                               "RX Dma failed to enter halt state. CSR 0x%08x\n",
860                               csr);
861         }
862 }
863 
864 /* et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
865  * @adapter: pointer to our adapter structure
866  *
867  * Mainly used after a return to the D0 (full-power) state from a lower state.
868  */
869 static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
870 {
871         /* Setup the transmit dma configuration register for normal
872          * operation
873          */
874         writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
875                                         &adapter->regs->txdma.csr);
876 }
877 
878 static inline void add_10bit(u32 *v, int n)
879 {
880         *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
881 }
882 
883 static inline void add_12bit(u32 *v, int n)
884 {
885         *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
886 }
887 
888 /* et1310_config_mac_regs1 - Initialize the first part of MAC regs
889  * @adapter: pointer to our adapter structure
890  */
891 static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
892 {
893         struct mac_regs __iomem *macregs = &adapter->regs->mac;
894         u32 station1;
895         u32 station2;
896         u32 ipg;
897 
898         /* First we need to reset everything.  Write to MAC configuration
899          * register 1 to perform reset.
900          */
901         writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET  |
902                ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
903                ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
904                &macregs->cfg1);
905 
906         /* Next lets configure the MAC Inter-packet gap register */
907         ipg = 0x38005860;               /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
908         ipg |= 0x50 << 8;               /* ifg enforce 0x50 */
909         writel(ipg, &macregs->ipg);
910 
911         /* Next lets configure the MAC Half Duplex register */
912         /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
913         writel(0x00A1F037, &macregs->hfdp);
914 
915         /* Next lets configure the MAC Interface Control register */
916         writel(0, &macregs->if_ctrl);
917 
918         /* Let's move on to setting up the mii management configuration */
919         writel(ET_MAC_MIIMGMT_CLK_RST, &macregs->mii_mgmt_cfg);
920 
921         /* Next lets configure the MAC Station Address register.  These
922          * values are read from the EEPROM during initialization and stored
923          * in the adapter structure.  We write what is stored in the adapter
924          * structure to the MAC Station Address registers high and low.  This
925          * station address is used for generating and checking pause control
926          * packets.
927          */
928         station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
929                    (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
930         station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
931                    (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
932                    (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
933                     adapter->addr[2];
934         writel(station1, &macregs->station_addr_1);
935         writel(station2, &macregs->station_addr_2);
936 
937         /* Max ethernet packet in bytes that will be passed by the mac without
938          * being truncated.  Allow the MAC to pass 4 more than our max packet
939          * size.  This is 4 for the Ethernet CRC.
940          *
941          * Packets larger than (registry_jumbo_packet) that do not contain a
942          * VLAN ID will be dropped by the Rx function.
943          */
944         writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
945 
946         /* clear out MAC config reset */
947         writel(0, &macregs->cfg1);
948 }
949 
950 /* et1310_config_mac_regs2 - Initialize the second part of MAC regs
951  * @adapter: pointer to our adapter structure
952  */
953 static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
954 {
955         int32_t delay = 0;
956         struct mac_regs __iomem *mac = &adapter->regs->mac;
957         struct phy_device *phydev = adapter->phydev;
958         u32 cfg1;
959         u32 cfg2;
960         u32 ifctrl;
961         u32 ctl;
962 
963         ctl = readl(&adapter->regs->txmac.ctl);
964         cfg1 = readl(&mac->cfg1);
965         cfg2 = readl(&mac->cfg2);
966         ifctrl = readl(&mac->if_ctrl);
967 
968         /* Set up the if mode bits */
969         cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
970         if (phydev->speed == SPEED_1000) {
971                 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
972                 /* Phy mode bit */
973                 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
974         } else {
975                 cfg2 |= ET_MAC_CFG2_IFMODE_100;
976                 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
977         }
978 
979         /* We need to enable Rx/Tx */
980         cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
981                                                         ET_MAC_CFG1_TX_FLOW;
982         /* Initialize loop back to off */
983         cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
984         if (adapter->flowcontrol == FLOW_RXONLY ||
985                                 adapter->flowcontrol == FLOW_BOTH)
986                 cfg1 |= ET_MAC_CFG1_RX_FLOW;
987         writel(cfg1, &mac->cfg1);
988 
989         /* Now we need to initialize the MAC Configuration 2 register */
990         /* preamble 7, check length, huge frame off, pad crc, crc enable
991          * full duplex off
992          */
993         cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
994         cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
995         cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
996         cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
997         cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
998         cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
999 
1000         /* Turn on duplex if needed */
1001         if (phydev->duplex == DUPLEX_FULL)
1002                 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
1003 
1004         ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
1005         if (phydev->duplex == DUPLEX_HALF)
1006                 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
1007 
1008         writel(ifctrl, &mac->if_ctrl);
1009         writel(cfg2, &mac->cfg2);
1010 
1011         do {
1012                 udelay(10);
1013                 delay++;
1014                 cfg1 = readl(&mac->cfg1);
1015         } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
1016 
1017         if (delay == 100) {
1018                 dev_warn(&adapter->pdev->dev,
1019                     "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1020                         cfg1);
1021         }
1022 
1023         /* Enable txmac */
1024         ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
1025         writel(ctl, &adapter->regs->txmac.ctl);
1026 
1027         /* Ready to start the RXDMA/TXDMA engine */
1028         if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
1029                 et131x_rx_dma_enable(adapter);
1030                 et131x_tx_dma_enable(adapter);
1031         }
1032 }
1033 
1034 /* et1310_in_phy_coma - check if the device is in phy coma
1035  * @adapter: pointer to our adapter structure
1036  *
1037  * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1038  */
1039 static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1040 {
1041         u32 pmcsr = readl(&adapter->regs->global.pm_csr);
1042 
1043         return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1044 }
1045 
1046 static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1047 {
1048         struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1049         u32 hash1 = 0;
1050         u32 hash2 = 0;
1051         u32 hash3 = 0;
1052         u32 hash4 = 0;
1053         u32 pm_csr;
1054 
1055         /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1056          * the multi-cast LIST.  If it is NOT specified, (and "ALL" is not
1057          * specified) then we should pass NO multi-cast addresses to the
1058          * driver.
1059          */
1060         if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1061                 int i;
1062 
1063                 /* Loop through our multicast array and set up the device */
1064                 for (i = 0; i < adapter->multicast_addr_count; i++) {
1065                         u32 result;
1066 
1067                         result = ether_crc(6, adapter->multicast_list[i]);
1068 
1069                         result = (result & 0x3F800000) >> 23;
1070 
1071                         if (result < 32) {
1072                                 hash1 |= (1 << result);
1073                         } else if ((31 < result) && (result < 64)) {
1074                                 result -= 32;
1075                                 hash2 |= (1 << result);
1076                         } else if ((63 < result) && (result < 96)) {
1077                                 result -= 64;
1078                                 hash3 |= (1 << result);
1079                         } else {
1080                                 result -= 96;
1081                                 hash4 |= (1 << result);
1082                         }
1083                 }
1084         }
1085 
1086         /* Write out the new hash to the device */
1087         pm_csr = readl(&adapter->regs->global.pm_csr);
1088         if (!et1310_in_phy_coma(adapter)) {
1089                 writel(hash1, &rxmac->multi_hash1);
1090                 writel(hash2, &rxmac->multi_hash2);
1091                 writel(hash3, &rxmac->multi_hash3);
1092                 writel(hash4, &rxmac->multi_hash4);
1093         }
1094 }
1095 
1096 static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1097 {
1098         struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1099         u32 uni_pf1;
1100         u32 uni_pf2;
1101         u32 uni_pf3;
1102         u32 pm_csr;
1103 
1104         /* Set up unicast packet filter reg 3 to be the first two octets of
1105          * the MAC address for both address
1106          *
1107          * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1108          * MAC address for second address
1109          *
1110          * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1111          * MAC address for first address
1112          */
1113         uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1114                   (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1115                   (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
1116                    adapter->addr[1];
1117 
1118         uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1119                   (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1120                   (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
1121                    adapter->addr[5];
1122 
1123         uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1124                   (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1125                   (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
1126                    adapter->addr[5];
1127 
1128         pm_csr = readl(&adapter->regs->global.pm_csr);
1129         if (!et1310_in_phy_coma(adapter)) {
1130                 writel(uni_pf1, &rxmac->uni_pf_addr1);
1131                 writel(uni_pf2, &rxmac->uni_pf_addr2);
1132                 writel(uni_pf3, &rxmac->uni_pf_addr3);
1133         }
1134 }
1135 
1136 static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1137 {
1138         struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1139         struct phy_device *phydev = adapter->phydev;
1140         u32 sa_lo;
1141         u32 sa_hi = 0;
1142         u32 pf_ctrl = 0;
1143 
1144         /* Disable the MAC while it is being configured (also disable WOL) */
1145         writel(0x8, &rxmac->ctrl);
1146 
1147         /* Initialize WOL to disabled. */
1148         writel(0, &rxmac->crc0);
1149         writel(0, &rxmac->crc12);
1150         writel(0, &rxmac->crc34);
1151 
1152         /* We need to set the WOL mask0 - mask4 next.  We initialize it to
1153          * its default Values of 0x00000000 because there are not WOL masks
1154          * as of this time.
1155          */
1156         writel(0, &rxmac->mask0_word0);
1157         writel(0, &rxmac->mask0_word1);
1158         writel(0, &rxmac->mask0_word2);
1159         writel(0, &rxmac->mask0_word3);
1160 
1161         writel(0, &rxmac->mask1_word0);
1162         writel(0, &rxmac->mask1_word1);
1163         writel(0, &rxmac->mask1_word2);
1164         writel(0, &rxmac->mask1_word3);
1165 
1166         writel(0, &rxmac->mask2_word0);
1167         writel(0, &rxmac->mask2_word1);
1168         writel(0, &rxmac->mask2_word2);
1169         writel(0, &rxmac->mask2_word3);
1170 
1171         writel(0, &rxmac->mask3_word0);
1172         writel(0, &rxmac->mask3_word1);
1173         writel(0, &rxmac->mask3_word2);
1174         writel(0, &rxmac->mask3_word3);
1175 
1176         writel(0, &rxmac->mask4_word0);
1177         writel(0, &rxmac->mask4_word1);
1178         writel(0, &rxmac->mask4_word2);
1179         writel(0, &rxmac->mask4_word3);
1180 
1181         /* Lets setup the WOL Source Address */
1182         sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1183                 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1184                 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
1185                  adapter->addr[5];
1186         writel(sa_lo, &rxmac->sa_lo);
1187 
1188         sa_hi = (u32) (adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
1189                        adapter->addr[1];
1190         writel(sa_hi, &rxmac->sa_hi);
1191 
1192         /* Disable all Packet Filtering */
1193         writel(0, &rxmac->pf_ctrl);
1194 
1195         /* Let's initialize the Unicast Packet filtering address */
1196         if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1197                 et1310_setup_device_for_unicast(adapter);
1198                 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
1199         } else {
1200                 writel(0, &rxmac->uni_pf_addr1);
1201                 writel(0, &rxmac->uni_pf_addr2);
1202                 writel(0, &rxmac->uni_pf_addr3);
1203         }
1204 
1205         /* Let's initialize the Multicast hash */
1206         if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1207                 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
1208                 et1310_setup_device_for_multicast(adapter);
1209         }
1210 
1211         /* Runt packet filtering.  Didn't work in version A silicon. */
1212         pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1213         pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
1214 
1215         if (adapter->registry_jumbo_packet > 8192)
1216                 /* In order to transmit jumbo packets greater than 8k, the
1217                  * FIFO between RxMAC and RxDMA needs to be reduced in size
1218                  * to (16k - Jumbo packet size).  In order to implement this,
1219                  * we must use "cut through" mode in the RxMAC, which chops
1220                  * packets down into segments which are (max_size * 16).  In
1221                  * this case we selected 256 bytes, since this is the size of
1222                  * the PCI-Express TLP's that the 1310 uses.
1223                  *
1224                  * seg_en on, fc_en off, size 0x10
1225                  */
1226                 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1227         else
1228                 writel(0, &rxmac->mcif_ctrl_max_seg);
1229 
1230         /* Initialize the MCIF water marks */
1231         writel(0, &rxmac->mcif_water_mark);
1232 
1233         /*  Initialize the MIF control */
1234         writel(0, &rxmac->mif_ctrl);
1235 
1236         /* Initialize the Space Available Register */
1237         writel(0, &rxmac->space_avail);
1238 
1239         /* Initialize the the mif_ctrl register
1240          * bit 3:  Receive code error. One or more nibbles were signaled as
1241          *         errors  during the reception of the packet.  Clear this
1242          *         bit in Gigabit, set it in 100Mbit.  This was derived
1243          *         experimentally at UNH.
1244          * bit 4:  Receive CRC error. The packet's CRC did not match the
1245          *         internally generated CRC.
1246          * bit 5:  Receive length check error. Indicates that frame length
1247          *         field value in the packet does not match the actual data
1248          *         byte length and is not a type field.
1249          * bit 16: Receive frame truncated.
1250          * bit 17: Drop packet enable
1251          */
1252         if (phydev && phydev->speed == SPEED_100)
1253                 writel(0x30038, &rxmac->mif_ctrl);
1254         else
1255                 writel(0x30030, &rxmac->mif_ctrl);
1256 
1257         /* Finally we initialize RxMac to be enabled & WOL disabled.  Packet
1258          * filter is always enabled since it is where the runt packets are
1259          * supposed to be dropped.  For version A silicon, runt packet
1260          * dropping doesn't work, so it is disabled in the pf_ctrl register,
1261          * but we still leave the packet filter on.
1262          */
1263         writel(pf_ctrl, &rxmac->pf_ctrl);
1264         writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
1265 }
1266 
1267 static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1268 {
1269         struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1270 
1271         /* We need to update the Control Frame Parameters
1272          * cfpt - control frame pause timer set to 64 (0x40)
1273          * cfep - control frame extended pause timer set to 0x0
1274          */
1275         if (adapter->flowcontrol == FLOW_NONE)
1276                 writel(0, &txmac->cf_param);
1277         else
1278                 writel(0x40, &txmac->cf_param);
1279 }
1280 
1281 static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1282 {
1283         struct macstat_regs __iomem *macstat =
1284                 &adapter->regs->macstat;
1285 
1286         /* Next we need to initialize all the macstat registers to zero on
1287          * the device.
1288          */
1289         writel(0, &macstat->txrx_0_64_byte_frames);
1290         writel(0, &macstat->txrx_65_127_byte_frames);
1291         writel(0, &macstat->txrx_128_255_byte_frames);
1292         writel(0, &macstat->txrx_256_511_byte_frames);
1293         writel(0, &macstat->txrx_512_1023_byte_frames);
1294         writel(0, &macstat->txrx_1024_1518_byte_frames);
1295         writel(0, &macstat->txrx_1519_1522_gvln_frames);
1296 
1297         writel(0, &macstat->rx_bytes);
1298         writel(0, &macstat->rx_packets);
1299         writel(0, &macstat->rx_fcs_errs);
1300         writel(0, &macstat->rx_multicast_packets);
1301         writel(0, &macstat->rx_broadcast_packets);
1302         writel(0, &macstat->rx_control_frames);
1303         writel(0, &macstat->rx_pause_frames);
1304         writel(0, &macstat->rx_unknown_opcodes);
1305         writel(0, &macstat->rx_align_errs);
1306         writel(0, &macstat->rx_frame_len_errs);
1307         writel(0, &macstat->rx_code_errs);
1308         writel(0, &macstat->rx_carrier_sense_errs);
1309         writel(0, &macstat->rx_undersize_packets);
1310         writel(0, &macstat->rx_oversize_packets);
1311         writel(0, &macstat->rx_fragment_packets);
1312         writel(0, &macstat->rx_jabbers);
1313         writel(0, &macstat->rx_drops);
1314 
1315         writel(0, &macstat->tx_bytes);
1316         writel(0, &macstat->tx_packets);
1317         writel(0, &macstat->tx_multicast_packets);
1318         writel(0, &macstat->tx_broadcast_packets);
1319         writel(0, &macstat->tx_pause_frames);
1320         writel(0, &macstat->tx_deferred);
1321         writel(0, &macstat->tx_excessive_deferred);
1322         writel(0, &macstat->tx_single_collisions);
1323         writel(0, &macstat->tx_multiple_collisions);
1324         writel(0, &macstat->tx_late_collisions);
1325         writel(0, &macstat->tx_excessive_collisions);
1326         writel(0, &macstat->tx_total_collisions);
1327         writel(0, &macstat->tx_pause_honored_frames);
1328         writel(0, &macstat->tx_drops);
1329         writel(0, &macstat->tx_jabbers);
1330         writel(0, &macstat->tx_fcs_errs);
1331         writel(0, &macstat->tx_control_frames);
1332         writel(0, &macstat->tx_oversize_frames);
1333         writel(0, &macstat->tx_undersize_frames);
1334         writel(0, &macstat->tx_fragments);
1335         writel(0, &macstat->carry_reg1);
1336         writel(0, &macstat->carry_reg2);
1337 
1338         /* Unmask any counters that we want to track the overflow of.
1339          * Initially this will be all counters.  It may become clear later
1340          * that we do not need to track all counters.
1341          */
1342         writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1343         writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1344 }
1345 
1346 /* et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
1347  * @adapter: pointer to our private adapter structure
1348  * @addr: the address of the transceiver
1349  * @reg: the register to read
1350  * @value: pointer to a 16-bit value in which the value will be stored
1351  */
1352 static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1353               u8 reg, u16 *value)
1354 {
1355         struct mac_regs __iomem *mac = &adapter->regs->mac;
1356         int status = 0;
1357         u32 delay = 0;
1358         u32 mii_addr;
1359         u32 mii_cmd;
1360         u32 mii_indicator;
1361 
1362         /* Save a local copy of the registers we are dealing with so we can
1363          * set them back
1364          */
1365         mii_addr = readl(&mac->mii_mgmt_addr);
1366         mii_cmd = readl(&mac->mii_mgmt_cmd);
1367 
1368         /* Stop the current operation */
1369         writel(0, &mac->mii_mgmt_cmd);
1370 
1371         /* Set up the register we need to read from on the correct PHY */
1372         writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1373 
1374         writel(0x1, &mac->mii_mgmt_cmd);
1375 
1376         do {
1377                 udelay(50);
1378                 delay++;
1379                 mii_indicator = readl(&mac->mii_mgmt_indicator);
1380         } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
1381 
1382         /* If we hit the max delay, we could not read the register */
1383         if (delay == 50) {
1384                 dev_warn(&adapter->pdev->dev,
1385                             "reg 0x%08x could not be read\n", reg);
1386                 dev_warn(&adapter->pdev->dev, "status is  0x%08x\n",
1387                             mii_indicator);
1388 
1389                 status = -EIO;
1390                 goto out;
1391         }
1392 
1393         /* If we hit here we were able to read the register and we need to
1394          * return the value to the caller
1395          */
1396         *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
1397 
1398 out:
1399         /* Stop the read operation */
1400         writel(0, &mac->mii_mgmt_cmd);
1401 
1402         /* set the registers we touched back to the state at which we entered
1403          * this function
1404          */
1405         writel(mii_addr, &mac->mii_mgmt_addr);
1406         writel(mii_cmd, &mac->mii_mgmt_cmd);
1407 
1408         return status;
1409 }
1410 
1411 static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1412 {
1413         struct phy_device *phydev = adapter->phydev;
1414 
1415         if (!phydev)
1416                 return -EIO;
1417 
1418         return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1419 }
1420 
1421 /* et131x_mii_write - Write to a PHY reg through the MII interface of the MAC
1422  * @adapter: pointer to our private adapter structure
1423  * @reg: the register to read
1424  * @value: 16-bit value to write
1425  */
1426 static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1427 {
1428         struct mac_regs __iomem *mac = &adapter->regs->mac;
1429         struct phy_device *phydev = adapter->phydev;
1430         int status = 0;
1431         u8 addr;
1432         u32 delay = 0;
1433         u32 mii_addr;
1434         u32 mii_cmd;
1435         u32 mii_indicator;
1436 
1437         if (!phydev)
1438                 return -EIO;
1439 
1440         addr = phydev->addr;
1441 
1442         /* Save a local copy of the registers we are dealing with so we can
1443          * set them back
1444          */
1445         mii_addr = readl(&mac->mii_mgmt_addr);
1446         mii_cmd = readl(&mac->mii_mgmt_cmd);
1447 
1448         /* Stop the current operation */
1449         writel(0, &mac->mii_mgmt_cmd);
1450 
1451         /* Set up the register we need to write to on the correct PHY */
1452         writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1453 
1454         /* Add the value to write to the registers to the mac */
1455         writel(value, &mac->mii_mgmt_ctrl);
1456 
1457         do {
1458                 udelay(50);
1459                 delay++;
1460                 mii_indicator = readl(&mac->mii_mgmt_indicator);
1461         } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
1462 
1463         /* If we hit the max delay, we could not write the register */
1464         if (delay == 100) {
1465                 u16 tmp;
1466 
1467                 dev_warn(&adapter->pdev->dev,
1468                     "reg 0x%08x could not be written", reg);
1469                 dev_warn(&adapter->pdev->dev, "status is  0x%08x\n",
1470                             mii_indicator);
1471                 dev_warn(&adapter->pdev->dev, "command is  0x%08x\n",
1472                             readl(&mac->mii_mgmt_cmd));
1473 
1474                 et131x_mii_read(adapter, reg, &tmp);
1475 
1476                 status = -EIO;
1477         }
1478         /* Stop the write operation */
1479         writel(0, &mac->mii_mgmt_cmd);
1480 
1481         /* set the registers we touched back to the state at which we entered
1482          * this function
1483          */
1484         writel(mii_addr, &mac->mii_mgmt_addr);
1485         writel(mii_cmd, &mac->mii_mgmt_cmd);
1486 
1487         return status;
1488 }
1489 
1490 static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1491                                     u16 regnum,
1492                                     u16 bitnum,
1493                                     u8 *value)
1494 {
1495         u16 reg;
1496         u16 mask = 1 << bitnum;
1497 
1498         /* Read the requested register */
1499         et131x_mii_read(adapter, regnum, &reg);
1500 
1501         *value = (reg & mask) >> bitnum;
1502 }
1503 
1504 static void et1310_config_flow_control(struct et131x_adapter *adapter)
1505 {
1506         struct phy_device *phydev = adapter->phydev;
1507 
1508         if (phydev->duplex == DUPLEX_HALF) {
1509                 adapter->flowcontrol = FLOW_NONE;
1510         } else {
1511                 char remote_pause, remote_async_pause;
1512 
1513                 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1514                 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
1515 
1516                 if (remote_pause && remote_async_pause) {
1517                         adapter->flowcontrol = adapter->wanted_flow;
1518                 } else if (remote_pause && !remote_async_pause) {
1519                         if (adapter->wanted_flow == FLOW_BOTH)
1520                                 adapter->flowcontrol = FLOW_BOTH;
1521                         else
1522                                 adapter->flowcontrol = FLOW_NONE;
1523                 } else if (!remote_pause && !remote_async_pause) {
1524                         adapter->flowcontrol = FLOW_NONE;
1525                 } else {
1526                         if (adapter->wanted_flow == FLOW_BOTH)
1527                                 adapter->flowcontrol = FLOW_RXONLY;
1528                         else
1529                                 adapter->flowcontrol = FLOW_NONE;
1530                 }
1531         }
1532 }
1533 
1534 /* et1310_update_macstat_host_counters - Update local copy of the statistics */
1535 static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1536 {
1537         struct ce_stats *stats = &adapter->stats;
1538         struct macstat_regs __iomem *macstat =
1539                 &adapter->regs->macstat;
1540 
1541         stats->tx_collisions           += readl(&macstat->tx_total_collisions);
1542         stats->tx_first_collisions     += readl(&macstat->tx_single_collisions);
1543         stats->tx_deferred             += readl(&macstat->tx_deferred);
1544         stats->tx_excessive_collisions +=
1545                                 readl(&macstat->tx_multiple_collisions);
1546         stats->tx_late_collisions      += readl(&macstat->tx_late_collisions);
1547         stats->tx_underflows           += readl(&macstat->tx_undersize_frames);
1548         stats->tx_max_pkt_errs         += readl(&macstat->tx_oversize_frames);
1549 
1550         stats->rx_align_errs        += readl(&macstat->rx_align_errs);
1551         stats->rx_crc_errs          += readl(&macstat->rx_code_errs);
1552         stats->rcvd_pkts_dropped    += readl(&macstat->rx_drops);
1553         stats->rx_overflows         += readl(&macstat->rx_oversize_packets);
1554         stats->rx_code_violations   += readl(&macstat->rx_fcs_errs);
1555         stats->rx_length_errs       += readl(&macstat->rx_frame_len_errs);
1556         stats->rx_other_errs        += readl(&macstat->rx_fragment_packets);
1557 }
1558 
1559 /* et1310_handle_macstat_interrupt
1560  *
1561  * One of the MACSTAT counters has wrapped.  Update the local copy of
1562  * the statistics held in the adapter structure, checking the "wrap"
1563  * bit for each counter.
1564  */
1565 static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1566 {
1567         u32 carry_reg1;
1568         u32 carry_reg2;
1569 
1570         /* Read the interrupt bits from the register(s).  These are Clear On
1571          * Write.
1572          */
1573         carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1574         carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1575 
1576         writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1577         writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1578 
1579         /* We need to do update the host copy of all the MAC_STAT counters.
1580          * For each counter, check it's overflow bit.  If the overflow bit is
1581          * set, then increment the host version of the count by one complete
1582          * revolution of the counter.  This routine is called when the counter
1583          * block indicates that one of the counters has wrapped.
1584          */
1585         if (carry_reg1 & (1 << 14))
1586                 adapter->stats.rx_code_violations       += COUNTER_WRAP_16_BIT;
1587         if (carry_reg1 & (1 << 8))
1588                 adapter->stats.rx_align_errs    += COUNTER_WRAP_12_BIT;
1589         if (carry_reg1 & (1 << 7))
1590                 adapter->stats.rx_length_errs   += COUNTER_WRAP_16_BIT;
1591         if (carry_reg1 & (1 << 2))
1592                 adapter->stats.rx_other_errs    += COUNTER_WRAP_16_BIT;
1593         if (carry_reg1 & (1 << 6))
1594                 adapter->stats.rx_crc_errs      += COUNTER_WRAP_16_BIT;
1595         if (carry_reg1 & (1 << 3))
1596                 adapter->stats.rx_overflows     += COUNTER_WRAP_16_BIT;
1597         if (carry_reg1 & (1 << 0))
1598                 adapter->stats.rcvd_pkts_dropped        += COUNTER_WRAP_16_BIT;
1599         if (carry_reg2 & (1 << 16))
1600                 adapter->stats.tx_max_pkt_errs  += COUNTER_WRAP_12_BIT;
1601         if (carry_reg2 & (1 << 15))
1602                 adapter->stats.tx_underflows    += COUNTER_WRAP_12_BIT;
1603         if (carry_reg2 & (1 << 6))
1604                 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1605         if (carry_reg2 & (1 << 8))
1606                 adapter->stats.tx_deferred      += COUNTER_WRAP_12_BIT;
1607         if (carry_reg2 & (1 << 5))
1608                 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1609         if (carry_reg2 & (1 << 4))
1610                 adapter->stats.tx_late_collisions       += COUNTER_WRAP_12_BIT;
1611         if (carry_reg2 & (1 << 2))
1612                 adapter->stats.tx_collisions    += COUNTER_WRAP_12_BIT;
1613 }
1614 
1615 static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1616 {
1617         struct net_device *netdev = bus->priv;
1618         struct et131x_adapter *adapter = netdev_priv(netdev);
1619         u16 value;
1620         int ret;
1621 
1622         ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1623 
1624         if (ret < 0)
1625                 return ret;
1626         else
1627                 return value;
1628 }
1629 
1630 static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1631                              int reg, u16 value)
1632 {
1633         struct net_device *netdev = bus->priv;
1634         struct et131x_adapter *adapter = netdev_priv(netdev);
1635 
1636         return et131x_mii_write(adapter, reg, value);
1637 }
1638 
1639 static int et131x_mdio_reset(struct mii_bus *bus)
1640 {
1641         struct net_device *netdev = bus->priv;
1642         struct et131x_adapter *adapter = netdev_priv(netdev);
1643 
1644         et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1645 
1646         return 0;
1647 }
1648 
1649 /*      et1310_phy_power_switch -       PHY power control
1650  *      @adapter: device to control
1651  *      @down: true for off/false for back on
1652  *
1653  *      one hundred, ten, one thousand megs
1654  *      How would you like to have your LAN accessed
1655  *      Can't you see that this code processed
1656  *      Phy power, phy power..
1657  */
1658 static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1659 {
1660         u16 data;
1661 
1662         et131x_mii_read(adapter, MII_BMCR, &data);
1663         data &= ~BMCR_PDOWN;
1664         if (down)
1665                 data |= BMCR_PDOWN;
1666         et131x_mii_write(adapter, MII_BMCR, data);
1667 }
1668 
1669 /* et131x_xcvr_init - Init the phy if we are setting it into force mode */
1670 static void et131x_xcvr_init(struct et131x_adapter *adapter)
1671 {
1672         u16 lcr2;
1673 
1674         /* Set the LED behavior such that LED 1 indicates speed (off =
1675          * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1676          * link and activity (on for link, blink off for activity).
1677          *
1678          * NOTE: Some customizations have been added here for specific
1679          * vendors; The LED behavior is now determined by vendor data in the
1680          * EEPROM. However, the above description is the default.
1681          */
1682         if ((adapter->eeprom_data[1] & 0x4) == 0) {
1683                 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1684 
1685                 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1686                 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1687 
1688                 if ((adapter->eeprom_data[1] & 0x8) == 0)
1689                         lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1690                 else
1691                         lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1692 
1693                 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1694         }
1695 }
1696 
1697 /* et131x_configure_global_regs - configure JAGCore global regs
1698  *
1699  * Used to configure the global registers on the JAGCore
1700  */
1701 static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1702 {
1703         struct global_regs __iomem *regs = &adapter->regs->global;
1704 
1705         writel(0, &regs->rxq_start_addr);
1706         writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1707 
1708         if (adapter->registry_jumbo_packet < 2048) {
1709                 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1710                  * block of RAM that the driver can split between Tx
1711                  * and Rx as it desires.  Our default is to split it
1712                  * 50/50:
1713                  */
1714                 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1715                 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1716         } else if (adapter->registry_jumbo_packet < 8192) {
1717                 /* For jumbo packets > 2k but < 8k, split 50-50. */
1718                 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1719                 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1720         } else {
1721                 /* 9216 is the only packet size greater than 8k that
1722                  * is available. The Tx buffer has to be big enough
1723                  * for one whole packet on the Tx side. We'll make
1724                  * the Tx 9408, and give the rest to Rx
1725                  */
1726                 writel(0x01b3, &regs->rxq_end_addr);
1727                 writel(0x01b4, &regs->txq_start_addr);
1728         }
1729 
1730         /* Initialize the loopback register. Disable all loopbacks. */
1731         writel(0, &regs->loopback);
1732 
1733         /* MSI Register */
1734         writel(0, &regs->msi_config);
1735 
1736         /* By default, disable the watchdog timer.  It will be enabled when
1737          * a packet is queued.
1738          */
1739         writel(0, &regs->watchdog_timer);
1740 }
1741 
1742 /* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */
1743 static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1744 {
1745         struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1746         struct rx_ring *rx_local = &adapter->rx_ring;
1747         struct fbr_desc *fbr_entry;
1748         u32 entry;
1749         u32 psr_num_des;
1750         unsigned long flags;
1751         u8 id;
1752 
1753         /* Halt RXDMA to perform the reconfigure.  */
1754         et131x_rx_dma_disable(adapter);
1755 
1756         /* Load the completion writeback physical address */
1757         writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1758         writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1759 
1760         memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1761 
1762         /* Set the address and parameters of the packet status ring into the
1763          * 1310's registers
1764          */
1765         writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1766         writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1767         writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1768         writel(0, &rx_dma->psr_full_offset);
1769 
1770         psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
1771         writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1772                &rx_dma->psr_min_des);
1773 
1774         spin_lock_irqsave(&adapter->rcv_lock, flags);
1775 
1776         /* These local variables track the PSR in the adapter structure */
1777         rx_local->local_psr_full = 0;
1778 
1779         for (id = 0; id < NUM_FBRS; id++) {
1780                 u32 __iomem *num_des;
1781                 u32 __iomem *full_offset;
1782                 u32 __iomem *min_des;
1783                 u32 __iomem *base_hi;
1784                 u32 __iomem *base_lo;
1785                 struct fbr_lookup *fbr = rx_local->fbr[id];
1786 
1787                 if (id == 0) {
1788                         num_des = &rx_dma->fbr0_num_des;
1789                         full_offset = &rx_dma->fbr0_full_offset;
1790                         min_des = &rx_dma->fbr0_min_des;
1791                         base_hi = &rx_dma->fbr0_base_hi;
1792                         base_lo = &rx_dma->fbr0_base_lo;
1793                 } else {
1794                         num_des = &rx_dma->fbr1_num_des;
1795                         full_offset = &rx_dma->fbr1_full_offset;
1796                         min_des = &rx_dma->fbr1_min_des;
1797                         base_hi = &rx_dma->fbr1_base_hi;
1798                         base_lo = &rx_dma->fbr1_base_lo;
1799                 }
1800 
1801                 /* Now's the best time to initialize FBR contents */
1802                 fbr_entry = fbr->ring_virtaddr;
1803                 for (entry = 0; entry < fbr->num_entries; entry++) {
1804                         fbr_entry->addr_hi = fbr->bus_high[entry];
1805                         fbr_entry->addr_lo = fbr->bus_low[entry];
1806                         fbr_entry->word2 = entry;
1807                         fbr_entry++;
1808                 }
1809 
1810                 /* Set the address and parameters of Free buffer ring 1 and 0
1811                  * into the 1310's registers
1812                  */
1813                 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1814                 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1815                 writel(fbr->num_entries - 1, num_des);
1816                 writel(ET_DMA10_WRAP, full_offset);
1817 
1818                 /* This variable tracks the free buffer ring 1 full position,
1819                  * so it has to match the above.
1820                  */
1821                 fbr->local_full = ET_DMA10_WRAP;
1822                 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1823                        min_des);
1824         }
1825 
1826         /* Program the number of packets we will receive before generating an
1827          * interrupt.
1828          * For version B silicon, this value gets updated once autoneg is
1829          *complete.
1830          */
1831         writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1832 
1833         /* The "time_done" is not working correctly to coalesce interrupts
1834          * after a given time period, but rather is giving us an interrupt
1835          * regardless of whether we have received packets.
1836          * This value gets updated once autoneg is complete.
1837          */
1838         writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1839 
1840         spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1841 }
1842 
1843 /* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1844  *
1845  * Configure the transmit engine with the ring buffers we have created
1846  * and prepare it for use.
1847  */
1848 static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1849 {
1850         struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1851         struct tx_ring *tx_ring = &adapter->tx_ring;
1852 
1853         /* Load the hardware with the start of the transmit descriptor ring. */
1854         writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1855         writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
1856 
1857         /* Initialise the transmit DMA engine */
1858         writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1859 
1860         /* Load the completion writeback physical address */
1861         writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1862         writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
1863 
1864         *tx_ring->tx_status = 0;
1865 
1866         writel(0, &txdma->service_request);
1867         tx_ring->send_idx = 0;
1868 }
1869 
1870 /* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */
1871 static void et131x_adapter_setup(struct et131x_adapter *adapter)
1872 {
1873         /* Configure the JAGCore */
1874         et131x_configure_global_regs(adapter);
1875 
1876         et1310_config_mac_regs1(adapter);
1877 
1878         /* Configure the MMC registers */
1879         /* All we need to do is initialize the Memory Control Register */
1880         writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1881 
1882         et1310_config_rxmac_regs(adapter);
1883         et1310_config_txmac_regs(adapter);
1884 
1885         et131x_config_rx_dma_regs(adapter);
1886         et131x_config_tx_dma_regs(adapter);
1887 
1888         et1310_config_macstat_regs(adapter);
1889 
1890         et1310_phy_power_switch(adapter, 0);
1891         et131x_xcvr_init(adapter);
1892 }
1893 
1894 /* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */
1895 static void et131x_soft_reset(struct et131x_adapter *adapter)
1896 {
1897         u32 reg;
1898 
1899         /* Disable MAC Core */
1900         reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1901               ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1902               ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1903         writel(reg, &adapter->regs->mac.cfg1);
1904 
1905         reg = ET_RESET_ALL;
1906         writel(reg, &adapter->regs->global.sw_reset);
1907 
1908         reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1909               ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1910         writel(reg, &adapter->regs->mac.cfg1);
1911         writel(0, &adapter->regs->mac.cfg1);
1912 }
1913 
1914 /*      et131x_enable_interrupts        -       enable interrupt
1915  *
1916  *      Enable the appropriate interrupts on the ET131x according to our
1917  *      configuration
1918  */
1919 static void et131x_enable_interrupts(struct et131x_adapter *adapter)
1920 {
1921         u32 mask;
1922 
1923         /* Enable all global interrupts */
1924         if (adapter->flowcontrol == FLOW_TXONLY ||
1925             adapter->flowcontrol == FLOW_BOTH)
1926                 mask = INT_MASK_ENABLE;
1927         else
1928                 mask = INT_MASK_ENABLE_NO_FLOW;
1929 
1930         writel(mask, &adapter->regs->global.int_mask);
1931 }
1932 
1933 /*      et131x_disable_interrupts       -       interrupt disable
1934  *
1935  *      Block all interrupts from the et131x device at the device itself
1936  */
1937 static void et131x_disable_interrupts(struct et131x_adapter *adapter)
1938 {
1939         /* Disable all global interrupts */
1940         writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1941 }
1942 
1943 /* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 */
1944 static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
1945 {
1946         /* Setup the tramsmit dma configuration register */
1947         writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
1948                                         &adapter->regs->txdma.csr);
1949 }
1950 
1951 /* et131x_enable_txrx - Enable tx/rx queues */
1952 static void et131x_enable_txrx(struct net_device *netdev)
1953 {
1954         struct et131x_adapter *adapter = netdev_priv(netdev);
1955 
1956         /* Enable the Tx and Rx DMA engines (if not already enabled) */
1957         et131x_rx_dma_enable(adapter);
1958         et131x_tx_dma_enable(adapter);
1959 
1960         /* Enable device interrupts */
1961         if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
1962                 et131x_enable_interrupts(adapter);
1963 
1964         /* We're ready to move some data, so start the queue */
1965         netif_start_queue(netdev);
1966 }
1967 
1968 /* et131x_disable_txrx - Disable tx/rx queues */
1969 static void et131x_disable_txrx(struct net_device *netdev)
1970 {
1971         struct et131x_adapter *adapter = netdev_priv(netdev);
1972 
1973         /* First thing is to stop the queue */
1974         netif_stop_queue(netdev);
1975 
1976         /* Stop the Tx and Rx DMA engines */
1977         et131x_rx_dma_disable(adapter);
1978         et131x_tx_dma_disable(adapter);
1979 
1980         /* Disable device interrupts */
1981         et131x_disable_interrupts(adapter);
1982 }
1983 
1984 /* et131x_init_send - Initialize send data structures */
1985 static void et131x_init_send(struct et131x_adapter *adapter)
1986 {
1987         u32 ct;
1988         struct tx_ring *tx_ring = &adapter->tx_ring;
1989         struct tcb *tcb = tx_ring->tcb_ring;
1990 
1991         tx_ring->tcb_qhead = tcb;
1992 
1993         memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1994 
1995         /* Go through and set up each TCB */
1996         for (ct = 0; ct++ < NUM_TCB; tcb++)
1997                 /* Set the link pointer in HW TCB to the next TCB in the
1998                  * chain
1999                  */
2000                 tcb->next = tcb + 1;
2001 
2002         /* Set the  tail pointer */
2003         tcb--;
2004         tx_ring->tcb_qtail = tcb;
2005         tcb->next = NULL;
2006         /* Curr send queue should now be empty */
2007         tx_ring->send_head = NULL;
2008         tx_ring->send_tail = NULL;
2009 }
2010 
2011 /* et1310_enable_phy_coma - called when network cable is unplugged
2012  *
2013  * driver receive an phy status change interrupt while in D0 and check that
2014  * phy_status is down.
2015  *
2016  *          -- gate off JAGCore;
2017  *          -- set gigE PHY in Coma mode
2018  *          -- wake on phy_interrupt; Perform software reset JAGCore,
2019  *             re-initialize jagcore and gigE PHY
2020  *
2021  *      Add D0-ASPM-PhyLinkDown Support:
2022  *          -- while in D0, when there is a phy_interrupt indicating phy link
2023  *             down status, call the MPSetPhyComa routine to enter this active
2024  *             state power saving mode
2025  *          -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
2026  *       indicating linkup status, call the MPDisablePhyComa routine to
2027  *             restore JAGCore and gigE PHY
2028  */
2029 static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2030 {
2031         unsigned long flags;
2032         u32 pmcsr;
2033 
2034         pmcsr = readl(&adapter->regs->global.pm_csr);
2035 
2036         /* Save the GbE PHY speed and duplex modes. Need to restore this
2037          * when cable is plugged back in
2038          */
2039 
2040         /* Stop sending packets. */
2041         spin_lock_irqsave(&adapter->send_hw_lock, flags);
2042         adapter->flags |= FMP_ADAPTER_LOWER_POWER;
2043         spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2044 
2045         /* Wait for outstanding Receive packets */
2046 
2047         et131x_disable_txrx(adapter->netdev);
2048 
2049         /* Gate off JAGCore 3 clock domains */
2050         pmcsr &= ~ET_PMCSR_INIT;
2051         writel(pmcsr, &adapter->regs->global.pm_csr);
2052 
2053         /* Program gigE PHY in to Coma mode */
2054         pmcsr |= ET_PM_PHY_SW_COMA;
2055         writel(pmcsr, &adapter->regs->global.pm_csr);
2056 }
2057 
2058 /* et1310_disable_phy_coma - Disable the Phy Coma Mode */
2059 static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2060 {
2061         u32 pmcsr;
2062 
2063         pmcsr = readl(&adapter->regs->global.pm_csr);
2064 
2065         /* Disable phy_sw_coma register and re-enable JAGCore clocks */
2066         pmcsr |= ET_PMCSR_INIT;
2067         pmcsr &= ~ET_PM_PHY_SW_COMA;
2068         writel(pmcsr, &adapter->regs->global.pm_csr);
2069 
2070         /* Restore the GbE PHY speed and duplex modes;
2071          * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
2072          */
2073 
2074         /* Re-initialize the send structures */
2075         et131x_init_send(adapter);
2076 
2077         /* Bring the device back to the state it was during init prior to
2078          * autonegotiation being complete.  This way, when we get the auto-neg
2079          * complete interrupt, we can complete init by calling ConfigMacREGS2.
2080          */
2081         et131x_soft_reset(adapter);
2082 
2083         /* setup et1310 as per the documentation ?? */
2084         et131x_adapter_setup(adapter);
2085 
2086         /* Allow Tx to restart */
2087         adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
2088 
2089         et131x_enable_txrx(adapter->netdev);
2090 }
2091 
2092 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2093 {
2094         u32 tmp_free_buff_ring = *free_buff_ring;
2095 
2096         tmp_free_buff_ring++;
2097         /* This works for all cases where limit < 1024. The 1023 case
2098          * works because 1023++ is 1024 which means the if condition is not
2099          * taken but the carry of the bit into the wrap bit toggles the wrap
2100          * value correctly
2101          */
2102         if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2103                 tmp_free_buff_ring &= ~ET_DMA10_MASK;
2104                 tmp_free_buff_ring ^= ET_DMA10_WRAP;
2105         }
2106         /* For the 1023 case */
2107         tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
2108         *free_buff_ring = tmp_free_buff_ring;
2109         return tmp_free_buff_ring;
2110 }
2111 
2112 /* et131x_rx_dma_memory_alloc
2113  *
2114  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
2115  * and the Packet Status Ring.
2116  */
2117 static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2118 {
2119         u8 id;
2120         u32 i, j;
2121         u32 bufsize;
2122         u32 pktstat_ringsize;
2123         u32 fbr_chunksize;
2124         struct rx_ring *rx_ring = &adapter->rx_ring;
2125         struct fbr_lookup *fbr;
2126 
2127         /* Alloc memory for the lookup table */
2128         rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2129         if (rx_ring->fbr[0] == NULL)
2130                 return -ENOMEM;
2131         rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2132         if (rx_ring->fbr[1] == NULL)
2133                 return -ENOMEM;
2134 
2135         /* The first thing we will do is configure the sizes of the buffer
2136          * rings. These will change based on jumbo packet support.  Larger
2137          * jumbo packets increases the size of each entry in FBR0, and the
2138          * number of entries in FBR0, while at the same time decreasing the
2139          * number of entries in FBR1.
2140          *
2141          * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
2142          * entries are huge in order to accommodate a "jumbo" frame, then it
2143          * will have less entries.  Conversely, FBR1 will now be relied upon
2144          * to carry more "normal" frames, thus it's entry size also increases
2145          * and the number of entries goes up too (since it now carries
2146          * "small" + "regular" packets.
2147          *
2148          * In this scheme, we try to maintain 512 entries between the two
2149          * rings. Also, FBR1 remains a constant size - when it's size doubles
2150          * the number of entries halves.  FBR0 increases in size, however.
2151          */
2152 
2153         if (adapter->registry_jumbo_packet < 2048) {
2154                 rx_ring->fbr[0]->buffsize = 256;
2155                 rx_ring->fbr[0]->num_entries = 512;
2156                 rx_ring->fbr[1]->buffsize = 2048;
2157                 rx_ring->fbr[1]->num_entries = 512;
2158         } else if (adapter->registry_jumbo_packet < 4096) {
2159                 rx_ring->fbr[0]->buffsize = 512;
2160                 rx_ring->fbr[0]->num_entries = 1024;
2161                 rx_ring->fbr[1]->buffsize = 4096;
2162                 rx_ring->fbr[1]->num_entries = 512;
2163         } else {
2164                 rx_ring->fbr[0]->buffsize = 1024;
2165                 rx_ring->fbr[0]->num_entries = 768;
2166                 rx_ring->fbr[1]->buffsize = 16384;
2167                 rx_ring->fbr[1]->num_entries = 128;
2168         }
2169 
2170         rx_ring->psr_num_entries = rx_ring->fbr[0]->num_entries +
2171                                    rx_ring->fbr[1]->num_entries;
2172 
2173         for (id = 0; id < NUM_FBRS; id++) {
2174                 fbr = rx_ring->fbr[id];
2175                 /* Allocate an area of memory for Free Buffer Ring */
2176                 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2177                 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2178                                                         bufsize,
2179                                                         &fbr->ring_physaddr,
2180                                                         GFP_KERNEL);
2181                 if (!fbr->ring_virtaddr) {
2182                         dev_err(&adapter->pdev->dev,
2183                            "Cannot alloc memory for Free Buffer Ring %d\n", id);
2184                         return -ENOMEM;
2185                 }
2186         }
2187 
2188         for (id = 0; id < NUM_FBRS; id++) {
2189                 fbr = rx_ring->fbr[id];
2190                 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
2191 
2192                 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
2193                         dma_addr_t fbr_tmp_physaddr;
2194 
2195                         fbr->mem_virtaddrs[i] = dma_alloc_coherent(
2196                                         &adapter->pdev->dev, fbr_chunksize,
2197                                         &fbr->mem_physaddrs[i],
2198                                         GFP_KERNEL);
2199 
2200                         if (!fbr->mem_virtaddrs[i]) {
2201                                 dev_err(&adapter->pdev->dev,
2202                                         "Could not alloc memory\n");
2203                                 return -ENOMEM;
2204                         }
2205 
2206                         /* See NOTE in "Save Physical Address" comment above */
2207                         fbr_tmp_physaddr = fbr->mem_physaddrs[i];
2208 
2209                         for (j = 0; j < FBR_CHUNKS; j++) {
2210                                 u32 index = (i * FBR_CHUNKS) + j;
2211 
2212                                 /* Save the Virtual address of this index for
2213                                  * quick access later
2214                                  */
2215                                 fbr->virt[index] = (u8 *)fbr->mem_virtaddrs[i] +
2216                                                    (j * fbr->buffsize);
2217 
2218                                 /* now store the physical address in the
2219                                  * descriptor so the device can access it
2220                                  */
2221                                 fbr->bus_high[index] =
2222                                                 upper_32_bits(fbr_tmp_physaddr);
2223                                 fbr->bus_low[index] =
2224                                                 lower_32_bits(fbr_tmp_physaddr);
2225 
2226                                 fbr_tmp_physaddr += fbr->buffsize;
2227                         }
2228                 }
2229         }
2230 
2231         /* Allocate an area of memory for FIFO of Packet Status ring entries */
2232         pktstat_ringsize =
2233                 sizeof(struct pkt_stat_desc) * rx_ring->psr_num_entries;
2234 
2235         rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2236                                                   pktstat_ringsize,
2237                                                   &rx_ring->ps_ring_physaddr,
2238                                                   GFP_KERNEL);
2239 
2240         if (!rx_ring->ps_ring_virtaddr) {
2241                 dev_err(&adapter->pdev->dev,
2242                           "Cannot alloc memory for Packet Status Ring\n");
2243                 return -ENOMEM;
2244         }
2245 
2246         /* NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2247          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2248          * are ever returned, make sure the high part is retrieved here before
2249          * storing the adjusted address.
2250          */
2251 
2252         /* Allocate an area of memory for writeback of status information */
2253         rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2254                                             sizeof(struct rx_status_block),
2255                                             &rx_ring->rx_status_bus,
2256                                             GFP_KERNEL);
2257         if (!rx_ring->rx_status_block) {
2258                 dev_err(&adapter->pdev->dev,
2259                           "Cannot alloc memory for Status Block\n");
2260                 return -ENOMEM;
2261         }
2262         rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2263 
2264         /* The RFDs are going to be put on lists later on, so initialize the
2265          * lists now.
2266          */
2267         INIT_LIST_HEAD(&rx_ring->recv_list);
2268         return 0;
2269 }
2270 
2271 /* et131x_rx_dma_memory_free - Free all memory allocated within this module */
2272 static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2273 {
2274         u8 id;
2275         u32 index;
2276         u32 bufsize;
2277         u32 pktstat_ringsize;
2278         struct rfd *rfd;
2279         struct rx_ring *rx_ring = &adapter->rx_ring;
2280         struct fbr_lookup *fbr;
2281 
2282         /* Free RFDs and associated packet descriptors */
2283         WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2284 
2285         while (!list_empty(&rx_ring->recv_list)) {
2286                 rfd = list_entry(rx_ring->recv_list.next,
2287                                  struct rfd, list_node);
2288 
2289                 list_del(&rfd->list_node);
2290                 rfd->skb = NULL;
2291                 kfree(rfd);
2292         }
2293 
2294         /* Free Free Buffer Rings */
2295         for (id = 0; id < NUM_FBRS; id++) {
2296                 fbr = rx_ring->fbr[id];
2297 
2298                 if (!fbr || !fbr->ring_virtaddr)
2299                         continue;
2300 
2301                 /* First the packet memory */
2302                 for (index = 0;
2303                      index < fbr->num_entries / FBR_CHUNKS;
2304                      index++) {
2305                         if (fbr->mem_virtaddrs[index]) {
2306                                 bufsize = fbr->buffsize * FBR_CHUNKS;
2307 
2308                                 dma_free_coherent(&adapter->pdev->dev,
2309                                                   bufsize,
2310                                                   fbr->mem_virtaddrs[index],
2311                                                   fbr->mem_physaddrs[index]);
2312 
2313                                 fbr->mem_virtaddrs[index] = NULL;
2314                         }
2315                 }
2316 
2317                 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2318 
2319                 dma_free_coherent(&adapter->pdev->dev,
2320                                   bufsize,
2321                                   fbr->ring_virtaddr,
2322                                   fbr->ring_physaddr);
2323 
2324                 fbr->ring_virtaddr = NULL;
2325         }
2326 
2327         /* Free Packet Status Ring */
2328         if (rx_ring->ps_ring_virtaddr) {
2329                 pktstat_ringsize = sizeof(struct pkt_stat_desc) *
2330                                         rx_ring->psr_num_entries;
2331 
2332                 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2333                                     rx_ring->ps_ring_virtaddr,
2334                                     rx_ring->ps_ring_physaddr);
2335 
2336                 rx_ring->ps_ring_virtaddr = NULL;
2337         }
2338 
2339         /* Free area of memory for the writeback of status information */
2340         if (rx_ring->rx_status_block) {
2341                 dma_free_coherent(&adapter->pdev->dev,
2342                         sizeof(struct rx_status_block),
2343                         rx_ring->rx_status_block, rx_ring->rx_status_bus);
2344                 rx_ring->rx_status_block = NULL;
2345         }
2346 
2347         /* Free the FBR Lookup Table */
2348         kfree(rx_ring->fbr[0]);
2349         kfree(rx_ring->fbr[1]);
2350 
2351         /* Reset Counters */
2352         rx_ring->num_ready_recv = 0;
2353 }
2354 
2355 /* et131x_init_recv - Initialize receive data structures */
2356 static int et131x_init_recv(struct et131x_adapter *adapter)
2357 {
2358         struct rfd *rfd;
2359         u32 rfdct;
2360         struct rx_ring *rx_ring = &adapter->rx_ring;
2361 
2362         /* Setup each RFD */
2363         for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2364                 rfd = kzalloc(sizeof(struct rfd), GFP_ATOMIC | GFP_DMA);
2365                 if (!rfd)
2366                         return -ENOMEM;
2367 
2368                 rfd->skb = NULL;
2369 
2370                 /* Add this RFD to the recv_list */
2371                 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2372 
2373                 /* Increment the available RFD's */
2374                 rx_ring->num_ready_recv++;
2375         }
2376 
2377         return 0;
2378 }
2379 
2380 /* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
2381 static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2382 {
2383         struct phy_device *phydev = adapter->phydev;
2384 
2385         /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2386          * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2387          */
2388         if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2389                 writel(0, &adapter->regs->rxdma.max_pkt_time);
2390                 writel(1, &adapter->regs->rxdma.num_pkt_done);
2391         }
2392 }
2393 
2394 /* NICReturnRFD - Recycle a RFD and put it back onto the receive list
2395  * @adapter: pointer to our adapter
2396  * @rfd: pointer to the RFD
2397  */
2398 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2399 {
2400         struct rx_ring *rx_local = &adapter->rx_ring;
2401         struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2402         u16 buff_index = rfd->bufferindex;
2403         u8 ring_index = rfd->ringindex;
2404         unsigned long flags;
2405         struct fbr_lookup *fbr = rx_local->fbr[ring_index];
2406 
2407         /* We don't use any of the OOB data besides status. Otherwise, we
2408          * need to clean up OOB data
2409          */
2410         if (buff_index < fbr->num_entries) {
2411                 u32 free_buff_ring;
2412                 u32 __iomem *offset;
2413                 struct fbr_desc *next;
2414 
2415                 spin_lock_irqsave(&adapter->fbr_lock, flags);
2416 
2417                 if (ring_index == 0)
2418                         offset = &rx_dma->fbr0_full_offset;
2419                 else
2420                         offset = &rx_dma->fbr1_full_offset;
2421 
2422                 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2423                        INDEX10(fbr->local_full);
2424 
2425                 /* Handle the Free Buffer Ring advancement here. Write
2426                  * the PA / Buffer Index for the returned buffer into
2427                  * the oldest (next to be freed)FBR entry
2428                  */
2429                 next->addr_hi = fbr->bus_high[buff_index];
2430                 next->addr_lo = fbr->bus_low[buff_index];
2431                 next->word2 = buff_index;
2432 
2433                 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2434                                                      fbr->num_entries - 1);
2435                 writel(free_buff_ring, offset);
2436 
2437                 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2438         } else {
2439                 dev_err(&adapter->pdev->dev,
2440                           "%s illegal Buffer Index returned\n", __func__);
2441         }
2442 
2443         /* The processing on this RFD is done, so put it back on the tail of
2444          * our list
2445          */
2446         spin_lock_irqsave(&adapter->rcv_lock, flags);
2447         list_add_tail(&rfd->list_node, &rx_local->recv_list);
2448         rx_local->num_ready_recv++;
2449         spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2450 
2451         WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2452 }
2453 
2454 /* nic_rx_pkts - Checks the hardware for available packets
2455  *
2456  * Returns rfd, a pointer to our MPRFD.
2457  *
2458  * Checks the hardware for available packets, using completion ring
2459  * If packets are available, it gets an RFD from the recv_list, attaches
2460  * the packet to it, puts the RFD in the RecvPendList, and also returns
2461  * the pointer to the RFD.
2462  */
2463 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2464 {
2465         struct rx_ring *rx_local = &adapter->rx_ring;
2466         struct rx_status_block *status;
2467         struct pkt_stat_desc *psr;
2468         struct rfd *rfd;
2469         u32 i;
2470         u8 *buf;
2471         unsigned long flags;
2472         struct list_head *element;
2473         u8 ring_index;
2474         u16 buff_index;
2475         u32 len;
2476         u32 word0;
2477         u32 word1;
2478         struct sk_buff *skb;
2479         struct fbr_lookup *fbr;
2480 
2481         /* RX Status block is written by the DMA engine prior to every
2482          * interrupt. It contains the next to be used entry in the Packet
2483          * Status Ring, and also the two Free Buffer rings.
2484          */
2485         status = rx_local->rx_status_block;
2486         word1 = status->word1 >> 16;    /* Get the useful bits */
2487 
2488         /* Check the PSR and wrap bits do not match */
2489         if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2490                 return NULL; /* Looks like this ring is not updated yet */
2491 
2492         /* The packet status ring indicates that data is available. */
2493         psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2494                         (rx_local->local_psr_full & 0xFFF);
2495 
2496         /* Grab any information that is required once the PSR is advanced,
2497          * since we can no longer rely on the memory being accurate
2498          */
2499         len = psr->word1 & 0xFFFF;
2500         ring_index = (psr->word1 >> 26) & 0x03;
2501         fbr = rx_local->fbr[ring_index];
2502         buff_index = (psr->word1 >> 16) & 0x3FF;
2503         word0 = psr->word0;
2504 
2505         /* Indicate that we have used this PSR entry. */
2506         /* FIXME wrap 12 */
2507         add_12bit(&rx_local->local_psr_full, 1);
2508         if (
2509           (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2510                 /* Clear psr full and toggle the wrap bit */
2511                 rx_local->local_psr_full &=  ~0xFFF;
2512                 rx_local->local_psr_full ^= 0x1000;
2513         }
2514 
2515         writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2516 
2517         if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
2518                 /* Illegal buffer or ring index cannot be used by S/W*/
2519                 dev_err(&adapter->pdev->dev,
2520                         "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2521                         rx_local->local_psr_full & 0xFFF, len, buff_index);
2522                 return NULL;
2523         }
2524 
2525         /* Get and fill the RFD. */
2526         spin_lock_irqsave(&adapter->rcv_lock, flags);
2527 
2528         element = rx_local->recv_list.next;
2529         rfd = list_entry(element, struct rfd, list_node);
2530 
2531         if (!rfd) {
2532                 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2533                 return NULL;
2534         }
2535 
2536         list_del(&rfd->list_node);
2537         rx_local->num_ready_recv--;
2538 
2539         spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2540 
2541         rfd->bufferindex = buff_index;
2542         rfd->ringindex = ring_index;
2543 
2544         /* In V1 silicon, there is a bug which screws up filtering of runt
2545          * packets. Therefore runt packet filtering is disabled in the MAC and
2546          * the packets are dropped here. They are also counted here.
2547          */
2548         if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2549                 adapter->stats.rx_other_errs++;
2550                 len = 0;
2551         }
2552 
2553         if (len == 0) {
2554                 rfd->len = 0;
2555                 goto out;
2556         }
2557 
2558         /* Determine if this is a multicast packet coming in */
2559         if ((word0 & ALCATEL_MULTICAST_PKT) &&
2560             !(word0 & ALCATEL_BROADCAST_PKT)) {
2561                 /* Promiscuous mode and Multicast mode are not mutually
2562                  * exclusive as was first thought. I guess Promiscuous is just
2563                  * considered a super-set of the other filters. Generally filter
2564                  * is 0x2b when in promiscuous mode.
2565                  */
2566                 if ((adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST)
2567                    && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS)
2568                    && !(adapter->packet_filter &
2569                                         ET131X_PACKET_TYPE_ALL_MULTICAST)) {
2570                         buf = fbr->virt[buff_index];
2571 
2572                         /* Loop through our list to see if the destination
2573                          * address of this packet matches one in our list.
2574                          */
2575                         for (i = 0; i < adapter->multicast_addr_count; i++) {
2576                                 if (buf[0] == adapter->multicast_list[i][0]
2577                                  && buf[1] == adapter->multicast_list[i][1]
2578                                  && buf[2] == adapter->multicast_list[i][2]
2579                                  && buf[3] == adapter->multicast_list[i][3]
2580                                  && buf[4] == adapter->multicast_list[i][4]
2581                                  && buf[5] == adapter->multicast_list[i][5]) {
2582                                         break;
2583                                 }
2584                         }
2585 
2586                         /* If our index is equal to the number of Multicast
2587                          * address we have, then this means we did not find this
2588                          * packet's matching address in our list. Set the len to
2589                          * zero, so we free our RFD when we return from this
2590                          * function.
2591                          */
2592                         if (i == adapter->multicast_addr_count)
2593                                 len = 0;
2594                 }
2595 
2596                 if (len > 0)
2597                         adapter->stats.multicast_pkts_rcvd++;
2598         } else if (word0 & ALCATEL_BROADCAST_PKT) {
2599                 adapter->stats.broadcast_pkts_rcvd++;
2600         } else {
2601                 /* Not sure what this counter measures in promiscuous mode.
2602                  * Perhaps we should check the MAC address to see if it is
2603                  * directed to us in promiscuous mode.
2604                  */
2605                 adapter->stats.unicast_pkts_rcvd++;
2606         }
2607 
2608         if (!len) {
2609                 rfd->len = 0;
2610                 goto out;
2611         }
2612 
2613         rfd->len = len;
2614 
2615         skb = dev_alloc_skb(rfd->len + 2);
2616         if (!skb) {
2617                 dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n");
2618                 return NULL;
2619         }
2620 
2621         adapter->net_stats.rx_bytes += rfd->len;
2622 
2623         memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
2624 
2625         skb->protocol = eth_type_trans(skb, adapter->netdev);
2626         skb->ip_summed = CHECKSUM_NONE;
2627         netif_rx_ni(skb);
2628 
2629 out:
2630         nic_return_rfd(adapter, rfd);
2631         return rfd;
2632 }
2633 
2634 /* et131x_handle_recv_interrupt - Interrupt handler for receive processing
2635  *
2636  * Assumption, Rcv spinlock has been acquired.
2637  */
2638 static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
2639 {
2640         struct rfd *rfd = NULL;
2641         u32 count = 0;
2642         bool done = true;
2643         struct rx_ring *rx_ring = &adapter->rx_ring;
2644 
2645         /* Process up to available RFD's */
2646         while (count < NUM_PACKETS_HANDLED) {
2647                 if (list_empty(&rx_ring->recv_list)) {
2648                         WARN_ON(rx_ring->num_ready_recv != 0);
2649                         done = false;
2650                         break;
2651                 }
2652 
2653                 rfd = nic_rx_pkts(adapter);
2654 
2655                 if (rfd == NULL)
2656                         break;
2657 
2658                 /* Do not receive any packets until a filter has been set.
2659                  * Do not receive any packets until we have link.
2660                  * If length is zero, return the RFD in order to advance the
2661                  * Free buffer ring.
2662                  */
2663                 if (!adapter->packet_filter ||
2664                     !netif_carrier_ok(adapter->netdev) ||
2665                     rfd->len == 0)
2666                         continue;
2667 
2668                 /* Increment the number of packets we received */
2669                 adapter->net_stats.rx_packets++;
2670 
2671                 /* Set the status on the packet, either resources or success */
2672                 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
2673                         dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2674 
2675                 count++;
2676         }
2677 
2678         if (count == NUM_PACKETS_HANDLED || !done) {
2679                 rx_ring->unfinished_receives = true;
2680                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2681                        &adapter->regs->global.watchdog_timer);
2682         } else
2683                 /* Watchdog timer will disable itself if appropriate. */
2684                 rx_ring->unfinished_receives = false;
2685 }
2686 
2687 /* et131x_tx_dma_memory_alloc
2688  *
2689  * Allocates memory that will be visible both to the device and to the CPU.
2690  * The OS will pass us packets, pointers to which we will insert in the Tx
2691  * Descriptor queue. The device will read this queue to find the packets in
2692  * memory. The device will update the "status" in memory each time it xmits a
2693  * packet.
2694  */
2695 static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2696 {
2697         int desc_size = 0;
2698         struct tx_ring *tx_ring = &adapter->tx_ring;
2699 
2700         /* Allocate memory for the TCB's (Transmit Control Block) */
2701         tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2702                                     GFP_ATOMIC | GFP_DMA);
2703         if (!tx_ring->tcb_ring)
2704                 return -ENOMEM;
2705 
2706         desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2707         tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2708                                                    desc_size,
2709                                                    &tx_ring->tx_desc_ring_pa,
2710                                                    GFP_KERNEL);
2711         if (!tx_ring->tx_desc_ring) {
2712                 dev_err(&adapter->pdev->dev,
2713                         "Cannot alloc memory for Tx Ring\n");
2714                 return -ENOMEM;
2715         }
2716 
2717         /* Save physical address
2718          *
2719          * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2720          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2721          * are ever returned, make sure the high part is retrieved here before
2722          * storing the adjusted address.
2723          */
2724         /* Allocate memory for the Tx status block */
2725         tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2726                                                     sizeof(u32),
2727                                                     &tx_ring->tx_status_pa,
2728                                                     GFP_KERNEL);
2729         if (!tx_ring->tx_status_pa) {
2730                 dev_err(&adapter->pdev->dev,
2731                         "Cannot alloc memory for Tx status block\n");
2732                 return -ENOMEM;
2733         }
2734         return 0;
2735 }
2736 
2737 /* et131x_tx_dma_memory_free - Free all memory allocated within this module */
2738 static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2739 {
2740         int desc_size = 0;
2741         struct tx_ring *tx_ring = &adapter->tx_ring;
2742 
2743         if (tx_ring->tx_desc_ring) {
2744                 /* Free memory relating to Tx rings here */
2745                 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2746                 dma_free_coherent(&adapter->pdev->dev,
2747                                   desc_size,
2748                                   tx_ring->tx_desc_ring,
2749                                   tx_ring->tx_desc_ring_pa);
2750                 tx_ring->tx_desc_ring = NULL;
2751         }
2752 
2753         /* Free memory for the Tx status block */
2754         if (tx_ring->tx_status) {
2755                 dma_free_coherent(&adapter->pdev->dev,
2756                                   sizeof(u32),
2757                                   tx_ring->tx_status,
2758                                   tx_ring->tx_status_pa);
2759 
2760                 tx_ring->tx_status = NULL;
2761         }
2762         /* Free the memory for the tcb structures */
2763         kfree(tx_ring->tcb_ring);
2764 }
2765 
2766 /* nic_send_packet - NIC specific send handler for version B silicon.
2767  * @adapter: pointer to our adapter
2768  * @tcb: pointer to struct tcb
2769  */
2770 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2771 {
2772         u32 i;
2773         struct tx_desc desc[24];        /* 24 x 16 byte */
2774         u32 frag = 0;
2775         u32 thiscopy, remainder;
2776         struct sk_buff *skb = tcb->skb;
2777         u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2778         struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
2779         unsigned long flags;
2780         struct phy_device *phydev = adapter->phydev;
2781         dma_addr_t dma_addr;
2782         struct tx_ring *tx_ring = &adapter->tx_ring;
2783 
2784         /* Part of the optimizations of this send routine restrict us to
2785          * sending 24 fragments at a pass.  In practice we should never see
2786          * more than 5 fragments.
2787          *
2788          * NOTE: The older version of this function (below) can handle any
2789          * number of fragments. If needed, we can call this function,
2790          * although it is less efficient.
2791          */
2792 
2793         /* nr_frags should be no more than 18. */
2794         BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
2795 
2796         memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2797 
2798         for (i = 0; i < nr_frags; i++) {
2799                 /* If there is something in this element, lets get a
2800                  * descriptor from the ring and get the necessary data
2801                  */
2802                 if (i == 0) {
2803                         /* If the fragments are smaller than a standard MTU,
2804                          * then map them to a single descriptor in the Tx
2805                          * Desc ring. However, if they're larger, as is
2806                          * possible with support for jumbo packets, then
2807                          * split them each across 2 descriptors.
2808                          *
2809                          * This will work until we determine why the hardware
2810                          * doesn't seem to like large fragments.
2811                          */
2812                         if (skb_headlen(skb) <= 1514) {
2813                                 /* Low 16bits are length, high is vlan and
2814                                  * unused currently so zero
2815                                  */
2816                                 desc[frag].len_vlan = skb_headlen(skb);
2817                                 dma_addr = dma_map_single(&adapter->pdev->dev,
2818                                                           skb->data,
2819                                                           skb_headlen(skb),
2820                                                           DMA_TO_DEVICE);
2821                                 desc[frag].addr_lo = lower_32_bits(dma_addr);
2822                                 desc[frag].addr_hi = upper_32_bits(dma_addr);
2823                                 frag++;
2824                         } else {
2825                                 desc[frag].len_vlan = skb_headlen(skb) / 2;
2826                                 dma_addr = dma_map_single(&adapter->pdev->dev,
2827                                                          skb->data,
2828                                                          (skb_headlen(skb) / 2),
2829                                                          DMA_TO_DEVICE);
2830                                 desc[frag].addr_lo = lower_32_bits(dma_addr);
2831                                 desc[frag].addr_hi = upper_32_bits(dma_addr);
2832                                 frag++;
2833 
2834                                 desc[frag].len_vlan = skb_headlen(skb) / 2;
2835                                 dma_addr = dma_map_single(&adapter->pdev->dev,
2836                                                          skb->data +
2837                                                          (skb_headlen(skb) / 2),
2838                                                          (skb_headlen(skb) / 2),
2839                                                          DMA_TO_DEVICE);
2840                                 desc[frag].addr_lo = lower_32_bits(dma_addr);
2841                                 desc[frag].addr_hi = upper_32_bits(dma_addr);
2842                                 frag++;
2843                         }
2844                 } else {
2845                         desc[frag].len_vlan = frags[i - 1].size;
2846                         dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2847                                                     &frags[i - 1],
2848                                                     0,
2849                                                     frags[i - 1].size,
2850                                                     DMA_TO_DEVICE);
2851                         desc[frag].addr_lo = lower_32_bits(dma_addr);
2852                         desc[frag].addr_hi = upper_32_bits(dma_addr);
2853                         frag++;
2854                 }
2855         }
2856 
2857         if (phydev && phydev->speed == SPEED_1000) {
2858                 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
2859                         /* Last element & Interrupt flag */
2860                         desc[frag - 1].flags =
2861                                     TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2862                         tx_ring->since_irq = 0;
2863                 } else { /* Last element */
2864                         desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
2865                 }
2866         } else
2867                 desc[frag - 1].flags =
2868                                     TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2869 
2870         desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
2871 
2872         tcb->index_start = tx_ring->send_idx;
2873         tcb->stale = 0;
2874 
2875         spin_lock_irqsave(&adapter->send_hw_lock, flags);
2876 
2877         thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
2878 
2879         if (thiscopy >= frag) {
2880                 remainder = 0;
2881                 thiscopy = frag;
2882         } else {
2883                 remainder = frag - thiscopy;
2884         }
2885 
2886         memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2887                desc,
2888                sizeof(struct tx_desc) * thiscopy);
2889 
2890         add_10bit(&tx_ring->send_idx, thiscopy);
2891 
2892         if (INDEX10(tx_ring->send_idx) == 0 ||
2893                   INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
2894                 tx_ring->send_idx &= ~ET_DMA10_MASK;
2895                 tx_ring->send_idx ^= ET_DMA10_WRAP;
2896         }
2897 
2898         if (remainder) {
2899                 memcpy(tx_ring->tx_desc_ring,
2900                        desc + thiscopy,
2901                        sizeof(struct tx_desc) * remainder);
2902 
2903                 add_10bit(&tx_ring->send_idx, remainder);
2904         }
2905 
2906         if (INDEX10(tx_ring->send_idx) == 0) {
2907                 if (tx_ring->send_idx)
2908                         tcb->index = NUM_DESC_PER_RING_TX - 1;
2909                 else
2910                         tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
2911         } else
2912                 tcb->index = tx_ring->send_idx - 1;
2913 
2914         spin_lock(&adapter->tcb_send_qlock);
2915 
2916         if (tx_ring->send_tail)
2917                 tx_ring->send_tail->next = tcb;
2918         else
2919                 tx_ring->send_head = tcb;
2920 
2921         tx_ring->send_tail = tcb;
2922 
2923         WARN_ON(tcb->next != NULL);
2924 
2925         tx_ring->used++;
2926 
2927         spin_unlock(&adapter->tcb_send_qlock);
2928 
2929         /* Write the new write pointer back to the device. */
2930         writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
2931 
2932         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
2933          * timer to wake us up if this packet isn't followed by N more.
2934          */
2935         if (phydev && phydev->speed == SPEED_1000) {
2936                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2937                        &adapter->regs->global.watchdog_timer);
2938         }
2939         spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2940 
2941         return 0;
2942 }
2943 
2944 /* send_packet - Do the work to send a packet
2945  *
2946  * Assumption: Send spinlock has been acquired
2947  */
2948 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2949 {
2950         int status;
2951         struct tcb *tcb;
2952         u16 *shbufva;
2953         unsigned long flags;
2954         struct tx_ring *tx_ring = &adapter->tx_ring;
2955 
2956         /* All packets must have at least a MAC address and a protocol type */
2957         if (skb->len < ETH_HLEN)
2958                 return -EIO;
2959 
2960         /* Get a TCB for this packet */
2961         spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2962 
2963         tcb = tx_ring->tcb_qhead;
2964 
2965         if (tcb == NULL) {
2966                 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2967                 return -ENOMEM;
2968         }
2969 
2970         tx_ring->tcb_qhead = tcb->next;
2971 
2972         if (tx_ring->tcb_qhead == NULL)
2973                 tx_ring->tcb_qtail = NULL;
2974 
2975         spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2976 
2977         tcb->skb = skb;
2978 
2979         if (skb->data != NULL && skb_headlen(skb) >= 6) {
2980                 shbufva = (u16 *) skb->data;
2981 
2982                 if ((shbufva[0] == 0xffff) &&
2983                     (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff))
2984                         tcb->flags |= FMP_DEST_BROAD;
2985                 else if ((shbufva[0] & 0x3) == 0x0001)
2986                         tcb->flags |=  FMP_DEST_MULTI;
2987         }
2988 
2989         tcb->next = NULL;
2990 
2991         /* Call the NIC specific send handler. */
2992         status = nic_send_packet(adapter, tcb);
2993 
2994         if (status != 0) {
2995                 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2996 
2997                 if (tx_ring->tcb_qtail)
2998                         tx_ring->tcb_qtail->next = tcb;
2999                 else
3000                         /* Apparently ready Q is empty. */
3001                         tx_ring->tcb_qhead = tcb;
3002 
3003                 tx_ring->tcb_qtail = tcb;
3004                 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3005                 return status;
3006         }
3007         WARN_ON(tx_ring->used > NUM_TCB);
3008         return 0;
3009 }
3010 
3011 /* et131x_send_packets - This function is called by the OS to send packets */
3012 static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3013 {
3014         int status = 0;
3015         struct et131x_adapter *adapter = netdev_priv(netdev);
3016         struct tx_ring *tx_ring = &adapter->tx_ring;
3017 
3018         /* Send these packets
3019          *
3020          * NOTE: The Linux Tx entry point is only given one packet at a time
3021          * to Tx, so the PacketCount and it's array used makes no sense here
3022          */
3023 
3024         /* TCB is not available */
3025         if (tx_ring->used >= NUM_TCB) {
3026                 /* NOTE: If there's an error on send, no need to queue the
3027                  * packet under Linux; if we just send an error up to the
3028                  * netif layer, it will resend the skb to us.
3029                  */
3030                 status = -ENOMEM;
3031         } else {
3032                 /* We need to see if the link is up; if it's not, make the
3033                  * netif layer think we're good and drop the packet
3034                  */
3035                 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
3036                                         !netif_carrier_ok(netdev)) {
3037                         dev_kfree_skb_any(skb);
3038                         skb = NULL;
3039 
3040                         adapter->net_stats.tx_dropped++;
3041                 } else {
3042                         status = send_packet(skb, adapter);
3043                         if (status != 0 && status != -ENOMEM) {
3044                                 /* On any other error, make netif think we're
3045                                  * OK and drop the packet
3046                                  */
3047                                 dev_kfree_skb_any(skb);
3048                                 skb = NULL;
3049                                 adapter->net_stats.tx_dropped++;
3050                         }
3051                 }
3052         }
3053         return status;
3054 }
3055 
3056 /* free_send_packet - Recycle a struct tcb
3057  * @adapter: pointer to our adapter
3058  * @tcb: pointer to struct tcb
3059  *
3060  * Complete the packet if necessary
3061  * Assumption - Send spinlock has been acquired
3062  */
3063 static inline void free_send_packet(struct et131x_adapter *adapter,
3064                                                 struct tcb *tcb)
3065 {
3066         unsigned long flags;
3067         struct tx_desc *desc = NULL;
3068         struct net_device_stats *stats = &adapter->net_stats;
3069         struct tx_ring *tx_ring = &adapter->tx_ring;
3070         u64  dma_addr;
3071 
3072         if (tcb->flags & FMP_DEST_BROAD)
3073                 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3074         else if (tcb->flags & FMP_DEST_MULTI)
3075                 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3076         else
3077                 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3078 
3079         if (tcb->skb) {
3080                 stats->tx_bytes += tcb->skb->len;
3081 
3082                 /* Iterate through the TX descriptors on the ring
3083                  * corresponding to this packet and umap the fragments
3084                  * they point to
3085                  */
3086                 do {
3087                         desc = tx_ring->tx_desc_ring +
3088                                INDEX10(tcb->index_start);
3089 
3090                         dma_addr = desc->addr_lo;
3091                         dma_addr |= (u64)desc->addr_hi << 32;
3092 
3093                         dma_unmap_single(&adapter->pdev->dev,
3094                                          dma_addr,
3095                                          desc->len_vlan, DMA_TO_DEVICE);
3096 
3097                         add_10bit(&tcb->index_start, 1);
3098                         if (INDEX10(tcb->index_start) >=
3099                                                         NUM_DESC_PER_RING_TX) {
3100                                 tcb->index_start &= ~ET_DMA10_MASK;
3101                                 tcb->index_start ^= ET_DMA10_WRAP;
3102                         }
3103                 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
3104 
3105                 dev_kfree_skb_any(tcb->skb);
3106         }
3107 
3108         memset(tcb, 0, sizeof(struct tcb));
3109 
3110         /* Add the TCB to the Ready Q */
3111         spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3112 
3113         adapter->net_stats.tx_packets++;
3114 
3115         if (tx_ring->tcb_qtail)
3116                 tx_ring->tcb_qtail->next = tcb;
3117         else
3118                 /* Apparently ready Q is empty. */
3119                 tx_ring->tcb_qhead = tcb;
3120 
3121         tx_ring->tcb_qtail = tcb;
3122 
3123         spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3124         WARN_ON(tx_ring->used < 0);
3125 }
3126 
3127 /* et131x_free_busy_send_packets - Free and complete the stopped active sends
3128  *
3129  * Assumption - Send spinlock has been acquired
3130  */
3131 static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3132 {
3133         struct tcb *tcb;
3134         unsigned long flags;
3135         u32 freed = 0;
3136         struct tx_ring *tx_ring = &adapter->tx_ring;
3137 
3138         /* Any packets being sent? Check the first TCB on the send list */
3139         spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3140 
3141         tcb = tx_ring->send_head;
3142 
3143         while (tcb != NULL && freed < NUM_TCB) {
3144                 struct tcb *next = tcb->next;
3145 
3146                 tx_ring->send_head = next;
3147 
3148                 if (next == NULL)
3149                         tx_ring->send_tail = NULL;
3150 
3151                 tx_ring->used--;
3152 
3153                 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3154 
3155                 freed++;
3156                 free_send_packet(adapter, tcb);
3157 
3158                 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3159 
3160                 tcb = tx_ring->send_head;
3161         }
3162 
3163         WARN_ON(freed == NUM_TCB);
3164 
3165         spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3166 
3167         tx_ring->used = 0;
3168 }
3169 
3170 /* et131x_handle_send_interrupt - Interrupt handler for sending processing
3171  *
3172  * Re-claim the send resources, complete sends and get more to send from
3173  * the send wait queue.
3174  *
3175  * Assumption - Send spinlock has been acquired
3176  */
3177 static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3178 {
3179         unsigned long flags;
3180         u32 serviced;
3181         struct tcb *tcb;
3182         u32 index;
3183         struct tx_ring *tx_ring = &adapter->tx_ring;
3184 
3185         serviced = readl(&adapter->regs->txdma.new_service_complete);
3186         index = INDEX10(serviced);
3187 
3188         /* Has the ring wrapped?  Process any descriptors that do not have
3189          * the same "wrap" indicator as the current completion indicator
3190          */
3191         spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3192 
3193         tcb = tx_ring->send_head;
3194 
3195         while (tcb &&
3196                ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3197                index < INDEX10(tcb->index)) {
3198                 tx_ring->used--;
3199                 tx_ring->send_head = tcb->next;
3200                 if (tcb->next == NULL)
3201                         tx_ring->send_tail = NULL;
3202 
3203                 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3204                 free_send_packet(adapter, tcb);
3205                 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3206 
3207                 /* Goto the next packet */
3208                 tcb = tx_ring->send_head;
3209         }
3210         while (tcb &&
3211                !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3212                && index > (tcb->index & ET_DMA10_MASK)) {
3213                 tx_ring->used--;
3214                 tx_ring->send_head = tcb->next;
3215                 if (tcb->next == NULL)
3216                         tx_ring->send_tail = NULL;
3217 
3218                 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3219                 free_send_packet(adapter, tcb);
3220                 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3221 
3222                 /* Goto the next packet */
3223                 tcb = tx_ring->send_head;
3224         }
3225 
3226         /* Wake up the queue when we hit a low-water mark */
3227         if (tx_ring->used <= NUM_TCB / 3)
3228                 netif_wake_queue(adapter->netdev);
3229 
3230         spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3231 }
3232 
3233 static int et131x_get_settings(struct net_device *netdev,
3234                                struct ethtool_cmd *cmd)
3235 {
3236         struct et131x_adapter *adapter = netdev_priv(netdev);
3237 
3238         return phy_ethtool_gset(adapter->phydev, cmd);
3239 }
3240 
3241 static int et131x_set_settings(struct net_device *netdev,
3242                                struct ethtool_cmd *cmd)
3243 {
3244         struct et131x_adapter *adapter = netdev_priv(netdev);
3245 
3246         return phy_ethtool_sset(adapter->phydev, cmd);
3247 }
3248 
3249 static int et131x_get_regs_len(struct net_device *netdev)
3250 {
3251 #define ET131X_REGS_LEN 256
3252         return ET131X_REGS_LEN * sizeof(u32);
3253 }
3254 
3255 static void et131x_get_regs(struct net_device *netdev,
3256                             struct ethtool_regs *regs, void *regs_data)
3257 {
3258         struct et131x_adapter *adapter = netdev_priv(netdev);
3259         struct address_map __iomem *aregs = adapter->regs;
3260         u32 *regs_buff = regs_data;
3261         u32 num = 0;
3262         u16 tmp;
3263 
3264         memset(regs_data, 0, et131x_get_regs_len(netdev));
3265 
3266         regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3267                         adapter->pdev->device;
3268 
3269         /* PHY regs */
3270         et131x_mii_read(adapter, MII_BMCR, &tmp);
3271         regs_buff[num++] = tmp;
3272         et131x_mii_read(adapter, MII_BMSR, &tmp);
3273         regs_buff[num++] = tmp;
3274         et131x_mii_read(adapter, MII_PHYSID1, &tmp);
3275         regs_buff[num++] = tmp;
3276         et131x_mii_read(adapter, MII_PHYSID2, &tmp);
3277         regs_buff[num++] = tmp;
3278         et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
3279         regs_buff[num++] = tmp;
3280         et131x_mii_read(adapter, MII_LPA, &tmp);
3281         regs_buff[num++] = tmp;
3282         et131x_mii_read(adapter, MII_EXPANSION, &tmp);
3283         regs_buff[num++] = tmp;
3284         /* Autoneg next page transmit reg */
3285         et131x_mii_read(adapter, 0x07, &tmp);
3286         regs_buff[num++] = tmp;
3287         /* Link partner next page reg */
3288         et131x_mii_read(adapter, 0x08, &tmp);
3289         regs_buff[num++] = tmp;
3290         et131x_mii_read(adapter, MII_CTRL1000, &tmp);
3291         regs_buff[num++] = tmp;
3292         et131x_mii_read(adapter, MII_STAT1000, &tmp);
3293         regs_buff[num++] = tmp;
3294         et131x_mii_read(adapter, 0x0b, &tmp);
3295         regs_buff[num++] = tmp;
3296         et131x_mii_read(adapter, 0x0c, &tmp);
3297         regs_buff[num++] = tmp;
3298         et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
3299         regs_buff[num++] = tmp;
3300         et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
3301         regs_buff[num++] = tmp;
3302         et131x_mii_read(adapter, MII_ESTATUS, &tmp);
3303         regs_buff[num++] = tmp;
3304 
3305         et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
3306         regs_buff[num++] = tmp;
3307         et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
3308         regs_buff[num++] = tmp;
3309         et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
3310         regs_buff[num++] = tmp;
3311         et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
3312         regs_buff[num++] = tmp;
3313         et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
3314         regs_buff[num++] = tmp;
3315 
3316         et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
3317         regs_buff[num++] = tmp;
3318         et131x_mii_read(adapter, PHY_CONFIG, &tmp);
3319         regs_buff[num++] = tmp;
3320         et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
3321         regs_buff[num++] = tmp;
3322         et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
3323         regs_buff[num++] = tmp;
3324         et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
3325         regs_buff[num++] = tmp;
3326         et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
3327         regs_buff[num++] = tmp;
3328         et131x_mii_read(adapter, PHY_LED_1, &tmp);
3329         regs_buff[num++] = tmp;
3330         et131x_mii_read(adapter, PHY_LED_2, &tmp);
3331         regs_buff[num++] = tmp;
3332 
3333         /* Global regs */
3334         regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3335         regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3336         regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3337         regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3338         regs_buff[num++] = readl(&aregs->global.pm_csr);
3339         regs_buff[num++] = adapter->stats.interrupt_status;
3340         regs_buff[num++] = readl(&aregs->global.int_mask);
3341         regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3342         regs_buff[num++] = readl(&aregs->global.int_status_alias);
3343         regs_buff[num++] = readl(&aregs->global.sw_reset);
3344         regs_buff[num++] = readl(&aregs->global.slv_timer);
3345         regs_buff[num++] = readl(&aregs->global.msi_config);
3346         regs_buff[num++] = readl(&aregs->global.loopback);
3347         regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3348 
3349         /* TXDMA regs */
3350         regs_buff[num++] = readl(&aregs->txdma.csr);
3351         regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3352         regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3353         regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3354         regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3355         regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3356         regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3357         regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3358         regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3359         regs_buff[num++] = readl(&aregs->txdma.service_request);
3360         regs_buff[num++] = readl(&aregs->txdma.service_complete);
3361         regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3362         regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3363         regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3364         regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3365         regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3366         regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3367         regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3368         regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3369         regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3370         regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3371         regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3372         regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3373         regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3374         regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3375         regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3376 
3377         /* RXDMA regs */
3378         regs_buff[num++] = readl(&aregs->rxdma.csr);
3379         regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3380         regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3381         regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3382         regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3383         regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3384         regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3385         regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3386         regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3387         regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3388         regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3389         regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3390         regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3391         regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3392         regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3393         regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3394         regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3395         regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3396         regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3397         regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3398         regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3399         regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3400         regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3401         regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3402         regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3403         regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3404         regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3405         regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3406         regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3407 }
3408 
3409 static void et131x_get_drvinfo(struct net_device *netdev,
3410                                struct ethtool_drvinfo *info)
3411 {
3412         struct et131x_adapter *adapter = netdev_priv(netdev);
3413 
3414         strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
3415         strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
3416         strlcpy(info->bus_info, pci_name(adapter->pdev),
3417                 sizeof(info->bus_info));
3418 }
3419 
3420 static struct ethtool_ops et131x_ethtool_ops = {
3421         .get_settings   = et131x_get_settings,
3422         .set_settings   = et131x_set_settings,
3423         .get_drvinfo    = et131x_get_drvinfo,
3424         .get_regs_len   = et131x_get_regs_len,
3425         .get_regs       = et131x_get_regs,
3426         .get_link       = ethtool_op_get_link,
3427 };
3428 
3429 /* et131x_hwaddr_init - set up the MAC Address on the ET1310 */
3430 static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3431 {
3432         /* If have our default mac from init and no mac address from
3433          * EEPROM then we need to generate the last octet and set it on the
3434          * device
3435          */
3436         if (is_zero_ether_addr(adapter->rom_addr)) {
3437                 /* We need to randomly generate the last octet so we
3438                  * decrease our chances of setting the mac address to
3439                  * same as another one of our cards in the system
3440                  */
3441                 get_random_bytes(&adapter->addr[5], 1);
3442                 /* We have the default value in the register we are
3443                  * working with so we need to copy the current
3444                  * address into the permanent address
3445                  */
3446                 memcpy(adapter->rom_addr,
3447                         adapter->addr, ETH_ALEN);
3448         } else {
3449                 /* We do not have an override address, so set the
3450                  * current address to the permanent address and add
3451                  * it to the device
3452                  */
3453                 memcpy(adapter->addr,
3454                        adapter->rom_addr, ETH_ALEN);
3455         }
3456 }
3457 
3458 /* et131x_pci_init       - initial PCI setup
3459  *
3460  * Perform the initial setup of PCI registers and if possible initialise
3461  * the MAC address. At this point the I/O registers have yet to be mapped
3462  */
3463 static int et131x_pci_init(struct et131x_adapter *adapter,
3464                            struct pci_dev *pdev)
3465 {
3466         u16 max_payload;
3467         int i, rc;
3468 
3469         rc = et131x_init_eeprom(adapter);
3470         if (rc < 0)
3471                 goto out;
3472 
3473         if (!pci_is_pcie(pdev)) {
3474                 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3475                 goto err_out;
3476         }
3477 
3478         /* Let's set up the PORT LOGIC Register. */
3479 
3480         /* Program the Ack/Nak latency and replay timers */
3481         max_payload = pdev->pcie_mpss;
3482 
3483         if (max_payload < 2) {
3484                 static const u16 acknak[2] = { 0x76, 0xD0 };
3485                 static const u16 replay[2] = { 0x1E0, 0x2ED };
3486 
3487                 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3488                                                acknak[max_payload])) {
3489                         dev_err(&pdev->dev,
3490                           "Could not write PCI config space for ACK/NAK\n");
3491                         goto err_out;
3492                 }
3493                 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3494                                                replay[max_payload])) {
3495                         dev_err(&pdev->dev,
3496                           "Could not write PCI config space for Replay Timer\n");
3497                         goto err_out;
3498                 }
3499         }
3500 
3501         /* l0s and l1 latency timers.  We are using default values.
3502          * Representing 001 for L0s and 010 for L1
3503          */
3504         if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3505                 dev_err(&pdev->dev,
3506                   "Could not write PCI config space for Latency Timers\n");
3507                 goto err_out;
3508         }
3509 
3510         /* Change the max read size to 2k */
3511         if (pcie_set_readrq(pdev, 2048)) {
3512                 dev_err(&pdev->dev,
3513                         "Couldn't change PCI config space for Max read size\n");
3514                 goto err_out;
3515         }
3516 
3517         /* Get MAC address from config space if an eeprom exists, otherwise
3518          * the MAC address there will not be valid
3519          */
3520         if (!adapter->has_eeprom) {
3521                 et131x_hwaddr_init(adapter);
3522                 return 0;
3523         }
3524 
3525         for (i = 0; i < ETH_ALEN; i++) {
3526                 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3527                                         adapter->rom_addr + i)) {
3528                         dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3529                         goto err_out;
3530                 }
3531         }
3532         ether_addr_copy(adapter->addr, adapter->rom_addr);
3533 out:
3534         return rc;
3535 err_out:
3536         rc = -EIO;
3537         goto out;
3538 }
3539 
3540 /* et131x_error_timer_handler
3541  * @data: timer-specific variable; here a pointer to our adapter structure
3542  *
3543  * The routine called when the error timer expires, to track the number of
3544  * recurring errors.
3545  */
3546 static void et131x_error_timer_handler(unsigned long data)
3547 {
3548         struct et131x_adapter *adapter = (struct et131x_adapter *) data;
3549         struct phy_device *phydev = adapter->phydev;
3550 
3551         if (et1310_in_phy_coma(adapter)) {
3552                 /* Bring the device immediately out of coma, to
3553                  * prevent it from sleeping indefinitely, this
3554                  * mechanism could be improved!
3555                  */
3556                 et1310_disable_phy_coma(adapter);
3557                 adapter->boot_coma = 20;
3558         } else {
3559                 et1310_update_macstat_host_counters(adapter);
3560         }
3561 
3562         if (!phydev->link && adapter->boot_coma < 11)
3563                 adapter->boot_coma++;
3564 
3565         if (adapter->boot_coma == 10) {
3566                 if (!phydev->link) {
3567                         if (!et1310_in_phy_coma(adapter)) {
3568                                 /* NOTE - This was originally a 'sync with
3569                                  *  interrupt'. How to do that under Linux?
3570                                  */
3571                                 et131x_enable_interrupts(adapter);
3572                                 et1310_enable_phy_coma(adapter);
3573                         }
3574                 }
3575         }
3576 
3577         /* This is a periodic timer, so reschedule */
3578         mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
3579 }
3580 
3581 /* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx */
3582 static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3583 {
3584         et131x_tx_dma_memory_free(adapter);
3585         et131x_rx_dma_memory_free(adapter);
3586 }
3587 
3588 /* et131x_adapter_memory_alloc
3589  * Allocate all the memory blocks for send, receive and others.
3590  */
3591 static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3592 {
3593         int status;
3594 
3595         /* Allocate memory for the Tx Ring */
3596         status = et131x_tx_dma_memory_alloc(adapter);
3597         if (status) {
3598                 dev_err(&adapter->pdev->dev,
3599                           "et131x_tx_dma_memory_alloc FAILED\n");
3600                 et131x_tx_dma_memory_free(adapter);
3601                 return status;
3602         }
3603         /* Receive buffer memory allocation */
3604         status = et131x_rx_dma_memory_alloc(adapter);
3605         if (status) {
3606                 dev_err(&adapter->pdev->dev,
3607                           "et131x_rx_dma_memory_alloc FAILED\n");
3608                 et131x_adapter_memory_free(adapter);
3609                 return status;
3610         }
3611 
3612         /* Init receive data structures */
3613         status = et131x_init_recv(adapter);
3614         if (status) {
3615                 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
3616                 et131x_adapter_memory_free(adapter);
3617         }
3618         return status;
3619 }
3620 
3621 static void et131x_adjust_link(struct net_device *netdev)
3622 {
3623         struct et131x_adapter *adapter = netdev_priv(netdev);
3624         struct  phy_device *phydev = adapter->phydev;
3625 
3626         if (!phydev)
3627                 return;
3628         if (phydev->link == adapter->link)
3629                 return;
3630 
3631         /* Check to see if we are in coma mode and if
3632          * so, disable it because we will not be able
3633          * to read PHY values until we are out.
3634          */
3635         if (et1310_in_phy_coma(adapter))
3636                 et1310_disable_phy_coma(adapter);
3637 
3638         adapter->link = phydev->link;
3639         phy_print_status(phydev);
3640 
3641         if (phydev->link) {
3642                 adapter->boot_coma = 20;
3643                 if (phydev->speed == SPEED_10) {
3644                         u16 register18;
3645 
3646                         et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3647                                          &register18);
3648                         et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3649                                          register18 | 0x4);
3650                         et131x_mii_write(adapter, PHY_INDEX_REG,
3651                                          register18 | 0x8402);
3652                         et131x_mii_write(adapter, PHY_DATA_REG,
3653                                          register18 | 511);
3654                         et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3655                                          register18);
3656                 }
3657 
3658                 et1310_config_flow_control(adapter);
3659 
3660                 if (phydev->speed == SPEED_1000 &&
3661                     adapter->registry_jumbo_packet > 2048) {
3662                         u16 reg;
3663 
3664                         et131x_mii_read(adapter, PHY_CONFIG, &reg);
3665                         reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3666                         reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3667                         et131x_mii_write(adapter, PHY_CONFIG, reg);
3668                 }
3669 
3670                 et131x_set_rx_dma_timer(adapter);
3671                 et1310_config_mac_regs2(adapter);
3672         } else {
3673                 adapter->boot_coma = 0;
3674 
3675                 if (phydev->speed == SPEED_10) {
3676                         u16 register18;
3677 
3678                         et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3679                                          &register18);
3680                         et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3681                                          register18 | 0x4);
3682                         et131x_mii_write(adapter, PHY_INDEX_REG,
3683                                          register18 | 0x8402);
3684                         et131x_mii_write(adapter, PHY_DATA_REG,
3685                                          register18 | 511);
3686                         et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3687                                          register18);
3688                 }
3689 
3690                 /* Free the packets being actively sent & stopped */
3691                 et131x_free_busy_send_packets(adapter);
3692 
3693                 /* Re-initialize the send structures */
3694                 et131x_init_send(adapter);
3695 
3696                 /* Bring the device back to the state it was during
3697                  * init prior to autonegotiation being complete. This
3698                  * way, when we get the auto-neg complete interrupt,
3699                  * we can complete init by calling config_mac_regs2.
3700                  */
3701                 et131x_soft_reset(adapter);
3702 
3703                 /* Setup ET1310 as per the documentation */
3704                 et131x_adapter_setup(adapter);
3705 
3706                 /* perform reset of tx/rx */
3707                 et131x_disable_txrx(netdev);
3708                 et131x_enable_txrx(netdev);
3709         }
3710 }
3711 
3712 static int et131x_mii_probe(struct net_device *netdev)
3713 {
3714         struct et131x_adapter *adapter = netdev_priv(netdev);
3715         struct  phy_device *phydev = NULL;
3716 
3717         phydev = phy_find_first(adapter->mii_bus);
3718         if (!phydev) {
3719                 dev_err(&adapter->pdev->dev, "no PHY found\n");
3720                 return -ENODEV;
3721         }
3722 
3723         phydev = phy_connect(netdev, dev_name(&phydev->dev),
3724                              &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
3725 
3726         if (IS_ERR(phydev)) {
3727                 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3728                 return PTR_ERR(phydev);
3729         }
3730 
3731         phydev->supported &= (SUPPORTED_10baseT_Half
3732                                 | SUPPORTED_10baseT_Full
3733                                 | SUPPORTED_100baseT_Half
3734                                 | SUPPORTED_100baseT_Full
3735                                 | SUPPORTED_Autoneg
3736                                 | SUPPORTED_MII
3737                                 | SUPPORTED_TP);
3738 
3739         if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3740                 phydev->supported |= SUPPORTED_1000baseT_Full;
3741 
3742         phydev->advertising = phydev->supported;
3743         adapter->phydev = phydev;
3744 
3745         dev_info(&adapter->pdev->dev,
3746                  "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
3747                  phydev->drv->name, dev_name(&phydev->dev));
3748 
3749         return 0;
3750 }
3751 
3752 /* et131x_adapter_init
3753  *
3754  * Initialize the data structures for the et131x_adapter object and link
3755  * them together with the platform provided device structures.
3756  */
3757 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3758                                                   struct pci_dev *pdev)
3759 {
3760         static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3761 
3762         struct et131x_adapter *adapter;
3763 
3764         /* Allocate private adapter struct and copy in relevant information */
3765         adapter = netdev_priv(netdev);
3766         adapter->pdev = pci_dev_get(pdev);
3767         adapter->netdev = netdev;
3768 
3769         /* Initialize spinlocks here */
3770         spin_lock_init(&adapter->tcb_send_qlock);
3771         spin_lock_init(&adapter->tcb_ready_qlock);
3772         spin_lock_init(&adapter->send_hw_lock);
3773         spin_lock_init(&adapter->rcv_lock);
3774         spin_lock_init(&adapter->fbr_lock);
3775 
3776         adapter->registry_jumbo_packet = 1514;  /* 1514-9216 */
3777 
3778         /* Set the MAC address to a default */
3779         ether_addr_copy(adapter->addr, default_mac);
3780 
3781         return adapter;
3782 }
3783 
3784 /* et131x_pci_remove
3785  *
3786  * Registered in the pci_driver structure, this function is called when the
3787  * PCI subsystem detects that a PCI device which matches the information
3788  * contained in the pci_device_id table has been removed.
3789  */
3790 static void et131x_pci_remove(struct pci_dev *pdev)
3791 {
3792         struct net_device *netdev = pci_get_drvdata(pdev);
3793         struct et131x_adapter *adapter = netdev_priv(netdev);
3794 
3795         unregister_netdev(netdev);
3796         phy_disconnect(adapter->phydev);
3797         mdiobus_unregister(adapter->mii_bus);
3798         cancel_work_sync(&adapter->task);
3799         kfree(adapter->mii_bus->irq);
3800         mdiobus_free(adapter->mii_bus);
3801 
3802         et131x_adapter_memory_free(adapter);
3803         iounmap(adapter->regs);
3804         pci_dev_put(pdev);
3805 
3806         free_netdev(netdev);
3807         pci_release_regions(pdev);
3808         pci_disable_device(pdev);
3809 }
3810 
3811 /* et131x_up - Bring up a device for use.  */
3812 static void et131x_up(struct net_device *netdev)
3813 {
3814         struct et131x_adapter *adapter = netdev_priv(netdev);
3815 
3816         et131x_enable_txrx(netdev);
3817         phy_start(adapter->phydev);
3818 }
3819 
3820 /* et131x_down - Bring down the device */
3821 static void et131x_down(struct net_device *netdev)
3822 {
3823         struct et131x_adapter *adapter = netdev_priv(netdev);
3824 
3825         /* Save the timestamp for the TX watchdog, prevent a timeout */
3826         netdev->trans_start = jiffies;
3827 
3828         phy_stop(adapter->phydev);
3829         et131x_disable_txrx(netdev);
3830 }
3831 
3832 #ifdef CONFIG_PM_SLEEP
3833 static int et131x_suspend(struct device *dev)
3834 {
3835         struct pci_dev *pdev = to_pci_dev(dev);
3836         struct net_device *netdev = pci_get_drvdata(pdev);
3837 
3838         if (netif_running(netdev)) {
3839                 netif_device_detach(netdev);
3840                 et131x_down(netdev);
3841                 pci_save_state(pdev);
3842         }
3843 
3844         return 0;
3845 }
3846 
3847 static int et131x_resume(struct device *dev)
3848 {
3849         struct pci_dev *pdev = to_pci_dev(dev);
3850         struct net_device *netdev = pci_get_drvdata(pdev);
3851 
3852         if (netif_running(netdev)) {
3853                 pci_restore_state(pdev);
3854                 et131x_up(netdev);
3855                 netif_device_attach(netdev);
3856         }
3857 
3858         return 0;
3859 }
3860 
3861 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3862 #define ET131X_PM_OPS (&et131x_pm_ops)
3863 #else
3864 #define ET131X_PM_OPS NULL
3865 #endif
3866 
3867 /* et131x_isr - The Interrupt Service Routine for the driver.
3868  * @irq: the IRQ on which the interrupt was received.
3869  * @dev_id: device-specific info (here a pointer to a net_device struct)
3870  *
3871  * Returns a value indicating if the interrupt was handled.
3872  */
3873 static irqreturn_t et131x_isr(int irq, void *dev_id)
3874 {
3875         bool handled = true;
3876         struct net_device *netdev = (struct net_device *)dev_id;
3877         struct et131x_adapter *adapter = netdev_priv(netdev);
3878         struct rx_ring *rx_ring = &adapter->rx_ring;
3879         struct tx_ring *tx_ring = &adapter->tx_ring;
3880         u32 status;
3881 
3882         if (!netif_device_present(netdev)) {
3883                 handled = false;
3884                 goto out;
3885         }
3886 
3887         /* If the adapter is in low power state, then it should not
3888          * recognize any interrupt
3889          */
3890 
3891         /* Disable Device Interrupts */
3892         et131x_disable_interrupts(adapter);
3893 
3894         /* Get a copy of the value in the interrupt status register
3895          * so we can process the interrupting section
3896          */
3897         status = readl(&adapter->regs->global.int_status);
3898 
3899         if (adapter->flowcontrol == FLOW_TXONLY ||
3900             adapter->flowcontrol == FLOW_BOTH) {
3901                 status &= ~INT_MASK_ENABLE;
3902         } else {
3903                 status &= ~INT_MASK_ENABLE_NO_FLOW;
3904         }
3905 
3906         /* Make sure this is our interrupt */
3907         if (!status) {
3908                 handled = false;
3909                 et131x_enable_interrupts(adapter);
3910                 goto out;
3911         }
3912 
3913         /* This is our interrupt, so process accordingly */
3914 
3915         if (status & ET_INTR_WATCHDOG) {
3916                 struct tcb *tcb = tx_ring->send_head;
3917 
3918                 if (tcb)
3919                         if (++tcb->stale > 1)
3920                                 status |= ET_INTR_TXDMA_ISR;
3921 
3922                 if (rx_ring->unfinished_receives)
3923                         status |= ET_INTR_RXDMA_XFR_DONE;
3924                 else if (tcb == NULL)
3925                         writel(0, &adapter->regs->global.watchdog_timer);
3926 
3927                 status &= ~ET_INTR_WATCHDOG;
3928         }
3929 
3930         if (!status) {
3931                 /* This interrupt has in some way been "handled" by
3932                  * the ISR. Either it was a spurious Rx interrupt, or
3933                  * it was a Tx interrupt that has been filtered by
3934                  * the ISR.
3935                  */
3936                 et131x_enable_interrupts(adapter);
3937                 goto out;
3938         }
3939 
3940         /* We need to save the interrupt status value for use in our
3941          * DPC. We will clear the software copy of that in that
3942          * routine.
3943          */
3944         adapter->stats.interrupt_status = status;
3945 
3946         /* Schedule the ISR handler as a bottom-half task in the
3947          * kernel's tq_immediate queue, and mark the queue for
3948          * execution
3949          */
3950         schedule_work(&adapter->task);
3951 out:
3952         return IRQ_RETVAL(handled);
3953 }
3954 
3955 /* et131x_isr_handler - The ISR handler
3956  *
3957  * scheduled to run in a deferred context by the ISR. This is where the ISR's
3958  * work actually gets done.
3959  */
3960 static void et131x_isr_handler(struct work_struct *work)
3961 {
3962         struct et131x_adapter *adapter =
3963                 container_of(work, struct et131x_adapter, task);
3964         u32 status = adapter->stats.interrupt_status;
3965         struct address_map __iomem *iomem = adapter->regs;
3966 
3967         /* These first two are by far the most common.  Once handled, we clear
3968          * their two bits in the status word.  If the word is now zero, we
3969          * exit.
3970          */
3971         /* Handle all the completed Transmit interrupts */
3972         if (status & ET_INTR_TXDMA_ISR)
3973                 et131x_handle_send_interrupt(adapter);
3974 
3975         /* Handle all the completed Receives interrupts */
3976         if (status & ET_INTR_RXDMA_XFR_DONE)
3977                 et131x_handle_recv_interrupt(adapter);
3978 
3979         status &= ~(ET_INTR_TXDMA_ERR | ET_INTR_RXDMA_XFR_DONE);
3980 
3981         if (!status)
3982                 goto out;
3983 
3984         /* Handle the TXDMA Error interrupt */
3985         if (status & ET_INTR_TXDMA_ERR) {
3986                 /* Following read also clears the register (COR) */
3987                 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
3988 
3989                 dev_warn(&adapter->pdev->dev,
3990                             "TXDMA_ERR interrupt, error = %d\n",
3991                             txdma_err);
3992         }
3993 
3994         /* Handle Free Buffer Ring 0 and 1 Low interrupt */
3995         if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
3996                 /* This indicates the number of unused buffers in RXDMA free
3997                  * buffer ring 0 is <= the limit you programmed. Free buffer
3998                  * resources need to be returned.  Free buffers are consumed as
3999                  * packets are passed from the network to the host. The host
4000                  * becomes aware of the packets from the contents of the packet
4001                  * status ring. This ring is queried when the packet done
4002                  * interrupt occurs. Packets are then passed to the OS. When
4003                  * the OS is done with the packets the resources can be
4004                  * returned to the ET1310 for re-use. This interrupt is one
4005                  * method of returning resources.
4006                  */
4007 
4008                 /*  If the user has flow control on, then we will
4009                  * send a pause packet, otherwise just exit
4010                  */
4011                 if (adapter->flowcontrol == FLOW_TXONLY ||
4012                     adapter->flowcontrol == FLOW_BOTH) {
4013                         u32 pm_csr;
4014 
4015                         /* Tell the device to send a pause packet via the back
4016                          * pressure register (bp req and bp xon/xoff)
4017                          */
4018                         pm_csr = readl(&iomem->global.pm_csr);
4019                         if (!et1310_in_phy_coma(adapter))
4020                                 writel(3, &iomem->txmac.bp_ctrl);
4021                 }
4022         }
4023 
4024         /* Handle Packet Status Ring Low Interrupt */
4025         if (status & ET_INTR_RXDMA_STAT_LOW) {
4026                 /* Same idea as with the two Free Buffer Rings. Packets going
4027                  * from the network to the host each consume a free buffer
4028                  * resource and a packet status resource. These resoures are
4029                  * passed to the OS. When the OS is done with the resources,
4030                  * they need to be returned to the ET1310. This is one method
4031                  * of returning the resources.
4032                  */
4033         }
4034 
4035         /* Handle RXDMA Error Interrupt */
4036         if (status & ET_INTR_RXDMA_ERR) {
4037                 /* The rxdma_error interrupt is sent when a time-out on a
4038                  * request issued by the JAGCore has occurred or a completion is
4039                  * returned with an un-successful status. In both cases the
4040                  * request is considered complete. The JAGCore will
4041                  * automatically re-try the request in question. Normally
4042                  * information on events like these are sent to the host using
4043                  * the "Advanced Error Reporting" capability. This interrupt is
4044                  * another way of getting similar information. The only thing
4045                  * required is to clear the interrupt by reading the ISR in the
4046                  * global resources. The JAGCore will do a re-try on the
4047                  * request. Normally you should never see this interrupt. If
4048                  * you start to see this interrupt occurring frequently then
4049                  * something bad has occurred. A reset might be the thing to do.
4050                  */
4051                 /* TRAP();*/
4052 
4053                 dev_warn(&adapter->pdev->dev,
4054                             "RxDMA_ERR interrupt, error %x\n",
4055                             readl(&iomem->txmac.tx_test));
4056         }
4057 
4058         /* Handle the Wake on LAN Event */
4059         if (status & ET_INTR_WOL) {
4060                 /* This is a secondary interrupt for wake on LAN. The driver
4061                  * should never see this, if it does, something serious is
4062                  * wrong. We will TRAP the message when we are in DBG mode,
4063                  * otherwise we will ignore it.
4064                  */
4065                 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4066         }
4067 
4068         /* Let's move on to the TxMac */
4069         if (status & ET_INTR_TXMAC) {
4070                 u32 err = readl(&iomem->txmac.err);
4071 
4072                 /* When any of the errors occur and TXMAC generates an
4073                  * interrupt to report these errors, it usually means that
4074                  * TXMAC has detected an error in the data stream retrieved
4075                  * from the on-chip Tx Q. All of these errors are catastrophic
4076                  * and TXMAC won't be able to recover data when these errors
4077                  * occur. In a nutshell, the whole Tx path will have to be reset
4078                  * and re-configured afterwards.
4079                  */
4080                 dev_warn(&adapter->pdev->dev,
4081                          "TXMAC interrupt, error 0x%08x\n",
4082                          err);
4083 
4084                 /* If we are debugging, we want to see this error, otherwise we
4085                  * just want the device to be reset and continue
4086                  */
4087         }
4088 
4089         /* Handle RXMAC Interrupt */
4090         if (status & ET_INTR_RXMAC) {
4091                 /* These interrupts are catastrophic to the device, what we need
4092                  * to do is disable the interrupts and set the flag to cause us
4093                  * to reset so we can solve this issue.
4094                  */
4095                 /* MP_SET_FLAG( adapter, FMP_ADAPTER_HARDWARE_ERROR); */
4096 
4097                 dev_warn(&adapter->pdev->dev,
4098                          "RXMAC interrupt, error 0x%08x.  Requesting reset\n",
4099                          readl(&iomem->rxmac.err_reg));
4100 
4101                 dev_warn(&adapter->pdev->dev,
4102                          "Enable 0x%08x, Diag 0x%08x\n",
4103                          readl(&iomem->rxmac.ctrl),
4104                          readl(&iomem->rxmac.rxq_diag));
4105 
4106                 /* If we are debugging, we want to see this error, otherwise we
4107                  * just want the device to be reset and continue
4108                  */
4109         }
4110 
4111         /* Handle MAC_STAT Interrupt */
4112         if (status & ET_INTR_MAC_STAT) {
4113                 /* This means at least one of the un-masked counters in the
4114                  * MAC_STAT block has rolled over. Use this to maintain the top,
4115                  * software managed bits of the counter(s).
4116                  */
4117                 et1310_handle_macstat_interrupt(adapter);
4118         }
4119 
4120         /* Handle SLV Timeout Interrupt */
4121         if (status & ET_INTR_SLV_TIMEOUT) {
4122                 /* This means a timeout has occurred on a read or write request
4123                  * to one of the JAGCore registers. The Global Resources block
4124                  * has terminated the request and on a read request, returned a
4125                  * "fake" value. The most likely reasons are: Bad Address or the
4126                  * addressed module is in a power-down state and can't respond.
4127                  */
4128         }
4129 out:
4130         et131x_enable_interrupts(adapter);
4131 }
4132 
4133 /* et131x_stats - Return the current device statistics  */
4134 static struct net_device_stats *et131x_stats(struct net_device *netdev)
4135 {
4136         struct et131x_adapter *adapter = netdev_priv(netdev);
4137         struct net_device_stats *stats = &adapter->net_stats;
4138         struct ce_stats *devstat = &adapter->stats;
4139 
4140         stats->rx_errors = devstat->rx_length_errs +
4141                            devstat->rx_align_errs +
4142                            devstat->rx_crc_errs +
4143                            devstat->rx_code_violations +
4144                            devstat->rx_other_errs;
4145         stats->tx_errors = devstat->tx_max_pkt_errs;
4146         stats->multicast = devstat->multicast_pkts_rcvd;
4147         stats->collisions = devstat->tx_collisions;
4148 
4149         stats->rx_length_errors = devstat->rx_length_errs;
4150         stats->rx_over_errors = devstat->rx_overflows;
4151         stats->rx_crc_errors = devstat->rx_crc_errs;
4152 
4153         /* NOTE: These stats don't have corresponding values in CE_STATS,
4154          * so we're going to have to update these directly from within the
4155          * TX/RX code
4156          */
4157         /* stats->rx_bytes            = 20; devstat->; */
4158         /* stats->tx_bytes            = 20;  devstat->; */
4159         /* stats->rx_dropped          = devstat->; */
4160         /* stats->tx_dropped          = devstat->; */
4161 
4162         /*  NOTE: Not used, can't find analogous statistics */
4163         /* stats->rx_frame_errors     = devstat->; */
4164         /* stats->rx_fifo_errors      = devstat->; */
4165         /* stats->rx_missed_errors    = devstat->; */
4166 
4167         /* stats->tx_aborted_errors   = devstat->; */
4168         /* stats->tx_carrier_errors   = devstat->; */
4169         /* stats->tx_fifo_errors      = devstat->; */
4170         /* stats->tx_heartbeat_errors = devstat->; */
4171         /* stats->tx_window_errors    = devstat->; */
4172         return stats;
4173 }
4174 
4175 /* et131x_open - Open the device for use.  */
4176 static int et131x_open(struct net_device *netdev)
4177 {
4178         struct et131x_adapter *adapter = netdev_priv(netdev);
4179         struct pci_dev *pdev = adapter->pdev;
4180         unsigned int irq = pdev->irq;
4181         int result;
4182 
4183         /* Start the timer to track NIC errors */
4184         init_timer(&adapter->error_timer);
4185         adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4186         adapter->error_timer.function = et131x_error_timer_handler;
4187         adapter->error_timer.data = (unsigned long)adapter;
4188         add_timer(&adapter->error_timer);
4189 
4190         result = request_irq(irq, et131x_isr,
4191                              IRQF_SHARED, netdev->name, netdev);
4192         if (result) {
4193                 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4194                 return result;
4195         }
4196 
4197         adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
4198 
4199         et131x_up(netdev);
4200 
4201         return result;
4202 }
4203 
4204 /* et131x_close - Close the device */
4205 static int et131x_close(struct net_device *netdev)
4206 {
4207         struct et131x_adapter *adapter = netdev_priv(netdev);
4208 
4209         et131x_down(netdev);
4210 
4211         adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
4212         free_irq(adapter->pdev->irq, netdev);
4213 
4214         /* Stop the error timer */
4215         return del_timer_sync(&adapter->error_timer);
4216 }
4217 
4218 /* et131x_ioctl - The I/O Control handler for the driver
4219  * @netdev: device on which the control request is being made
4220  * @reqbuf: a pointer to the IOCTL request buffer
4221  * @cmd: the IOCTL command code
4222  */
4223 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4224                         int cmd)
4225 {
4226         struct et131x_adapter *adapter = netdev_priv(netdev);
4227 
4228         if (!adapter->phydev)
4229                 return -EINVAL;
4230 
4231         return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4232 }
4233 
4234 /* et131x_set_packet_filter - Configures the Rx Packet filtering on the device
4235  * @adapter: pointer to our private adapter structure
4236  *
4237  * FIXME: lot of dups with MAC code
4238  */
4239 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4240 {
4241         int filter = adapter->packet_filter;
4242         u32 ctrl;
4243         u32 pf_ctrl;
4244 
4245         ctrl = readl(&adapter->regs->rxmac.ctrl);
4246         pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4247 
4248         /* Default to disabled packet filtering.  Enable it in the individual
4249          * case statements that require the device to filter something
4250          */
4251         ctrl |= 0x04;
4252 
4253         /* Set us to be in promiscuous mode so we receive everything, this
4254          * is also true when we get a packet filter of 0
4255          */
4256         if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4257                 pf_ctrl &= ~7;  /* Clear filter bits */
4258         else {
4259                 /* Set us up with Multicast packet filtering.  Three cases are
4260                  * possible - (1) we have a multi-cast list, (2) we receive ALL
4261                  * multicast entries or (3) we receive none.
4262                  */
4263                 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4264                         pf_ctrl &= ~2;  /* Multicast filter bit */
4265                 else {
4266                         et1310_setup_device_for_multicast(adapter);
4267                         pf_ctrl |= 2;
4268                         ctrl &= ~0x04;
4269                 }
4270 
4271                 /* Set us up with Unicast packet filtering */
4272                 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4273                         et1310_setup_device_for_unicast(adapter);
4274                         pf_ctrl |= 4;
4275                         ctrl &= ~0x04;
4276                 }
4277 
4278                 /* Set us up with Broadcast packet filtering */
4279                 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4280                         pf_ctrl |= 1;   /* Broadcast filter bit */
4281                         ctrl &= ~0x04;
4282                 } else
4283                         pf_ctrl &= ~1;
4284 
4285                 /* Setup the receive mac configuration registers - Packet
4286                  * Filter control + the enable / disable for packet filter
4287                  * in the control reg.
4288                  */
4289                 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4290                 writel(ctrl, &adapter->regs->rxmac.ctrl);
4291         }
4292         return 0;
4293 }
4294 
4295 /* et131x_multicast - The handler to configure multicasting on the interface */
4296 static void et131x_multicast(struct net_device *netdev)
4297 {
4298         struct et131x_adapter *adapter = netdev_priv(netdev);
4299         int packet_filter;
4300         struct netdev_hw_addr *ha;
4301         int i;
4302 
4303         /* Before we modify the platform-independent filter flags, store them
4304          * locally. This allows us to determine if anything's changed and if
4305          * we even need to bother the hardware
4306          */
4307         packet_filter = adapter->packet_filter;
4308 
4309         /* Clear the 'multicast' flag locally; because we only have a single
4310          * flag to check multicast, and multiple multicast addresses can be
4311          * set, this is the easiest way to determine if more than one
4312          * multicast address is being set.
4313          */
4314         packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4315 
4316         /* Check the net_device flags and set the device independent flags
4317          * accordingly
4318          */
4319 
4320         if (netdev->flags & IFF_PROMISC)
4321                 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4322         else
4323                 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4324 
4325         if (netdev->flags & IFF_ALLMULTI)
4326                 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4327 
4328         if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4329                 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4330 
4331         if (netdev_mc_count(netdev) < 1) {
4332                 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4333                 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4334         } else
4335                 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
4336 
4337         /* Set values in the private adapter struct */
4338         i = 0;
4339         netdev_for_each_mc_addr(ha, netdev) {
4340                 if (i == NIC_MAX_MCAST_LIST)
4341                         break;
4342                 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4343         }
4344         adapter->multicast_addr_count = i;
4345 
4346         /* Are the new flags different from the previous ones? If not, then no
4347          * action is required
4348          *
4349          * NOTE - This block will always update the multicast_list with the
4350          *        hardware, even if the addresses aren't the same.
4351          */
4352         if (packet_filter != adapter->packet_filter)
4353                 et131x_set_packet_filter(adapter);
4354 }
4355 
4356 /* et131x_tx - The handler to tx a packet on the device */
4357 static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
4358 {
4359         int status = 0;
4360         struct et131x_adapter *adapter = netdev_priv(netdev);
4361         struct tx_ring *tx_ring = &adapter->tx_ring;
4362 
4363         /* stop the queue if it's getting full */
4364         if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
4365                 netif_stop_queue(netdev);
4366 
4367         /* Save the timestamp for the TX timeout watchdog */
4368         netdev->trans_start = jiffies;
4369 
4370         /* Call the device-specific data Tx routine */
4371         status = et131x_send_packets(skb, netdev);
4372 
4373         /* Check status and manage the netif queue if necessary */
4374         if (status != 0) {
4375                 if (status == -ENOMEM)
4376                         status = NETDEV_TX_BUSY;
4377                 else
4378                         status = NETDEV_TX_OK;
4379         }
4380         return status;
4381 }
4382 
4383 /* et131x_tx_timeout - Timeout handler
4384  *
4385  * The handler called when a Tx request times out. The timeout period is
4386  * specified by the 'tx_timeo" element in the net_device structure (see
4387  * et131x_alloc_device() to see how this value is set).
4388  */
4389 static void et131x_tx_timeout(struct net_device *netdev)
4390 {
4391         struct et131x_adapter *adapter = netdev_priv(netdev);
4392         struct tx_ring *tx_ring = &adapter->tx_ring;
4393         struct tcb *tcb;
4394         unsigned long flags;
4395 
4396         /* If the device is closed, ignore the timeout */
4397         if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
4398                 return;
4399 
4400         /* Any nonrecoverable hardware error?
4401          * Checks adapter->flags for any failure in phy reading
4402          */
4403         if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
4404                 return;
4405 
4406         /* Hardware failure? */
4407         if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
4408                 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
4409                 return;
4410         }
4411 
4412         /* Is send stuck? */
4413         spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
4414 
4415         tcb = tx_ring->send_head;
4416 
4417         if (tcb != NULL) {
4418                 tcb->count++;
4419 
4420                 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
4421                         spin_unlock_irqrestore(&adapter->tcb_send_qlock,
4422                                                flags);
4423 
4424                         dev_warn(&adapter->pdev->dev,
4425                                 "Send stuck - reset.  tcb->WrIndex %x, flags 0x%08x\n",
4426                                 tcb->index,
4427                                 tcb->flags);
4428 
4429                         adapter->net_stats.tx_errors++;
4430 
4431                         /* perform reset of tx/rx */
4432                         et131x_disable_txrx(netdev);
4433                         et131x_enable_txrx(netdev);
4434                         return;
4435                 }
4436         }
4437 
4438         spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
4439 }
4440 
4441 /* et131x_change_mtu - The handler called to change the MTU for the device */
4442 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
4443 {
4444         int result = 0;
4445         struct et131x_adapter *adapter = netdev_priv(netdev);
4446 
4447         /* Make sure the requested MTU is valid */
4448         if (new_mtu < 64 || new_mtu > 9216)
4449                 return -EINVAL;
4450 
4451         et131x_disable_txrx(netdev);
4452         et131x_handle_send_interrupt(adapter);
4453         et131x_handle_recv_interrupt(adapter);
4454 
4455         /* Set the new MTU */
4456         netdev->mtu = new_mtu;
4457 
4458         /* Free Rx DMA memory */
4459         et131x_adapter_memory_free(adapter);
4460 
4461         /* Set the config parameter for Jumbo Packet support */
4462         adapter->registry_jumbo_packet = new_mtu + 14;
4463         et131x_soft_reset(adapter);
4464 
4465         /* Alloc and init Rx DMA memory */
4466         result = et131x_adapter_memory_alloc(adapter);
4467         if (result != 0) {
4468                 dev_warn(&adapter->pdev->dev,
4469                         "Change MTU failed; couldn't re-alloc DMA memory\n");
4470                 return result;
4471         }
4472 
4473         et131x_init_send(adapter);
4474 
4475         et131x_hwaddr_init(adapter);
4476         memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4477 
4478         /* Init the device with the new settings */
4479         et131x_adapter_setup(adapter);
4480 
4481         et131x_enable_txrx(netdev);
4482 
4483         return result;
4484 }
4485 
4486 /* et131x_set_mac_addr - handler to change the MAC address for the device */
4487 static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
4488 {
4489         int result = 0;
4490         struct et131x_adapter *adapter = netdev_priv(netdev);
4491         struct sockaddr *address = new_mac;
4492 
4493         if (adapter == NULL)
4494                 return -ENODEV;
4495 
4496         /* Make sure the requested MAC is valid */
4497         if (!is_valid_ether_addr(address->sa_data))
4498                 return -EADDRNOTAVAIL;
4499 
4500         et131x_disable_txrx(netdev);
4501         et131x_handle_send_interrupt(adapter);
4502         et131x_handle_recv_interrupt(adapter);
4503 
4504         /* Set the new MAC */
4505         /* netdev->set_mac_address  = &new_mac; */
4506 
4507         memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
4508 
4509         netdev_info(netdev, "Setting MAC address to %pM\n",
4510                     netdev->dev_addr);
4511 
4512         /* Free Rx DMA memory */
4513         et131x_adapter_memory_free(adapter);
4514 
4515         et131x_soft_reset(adapter);
4516 
4517         /* Alloc and init Rx DMA memory */
4518         result = et131x_adapter_memory_alloc(adapter);
4519         if (result != 0) {
4520                 dev_err(&adapter->pdev->dev,
4521                         "Change MAC failed; couldn't re-alloc DMA memory\n");
4522                 return result;
4523         }
4524 
4525         et131x_init_send(adapter);
4526 
4527         et131x_hwaddr_init(adapter);
4528 
4529         /* Init the device with the new settings */
4530         et131x_adapter_setup(adapter);
4531 
4532         et131x_enable_txrx(netdev);
4533 
4534         return result;
4535 }
4536 
4537 static const struct net_device_ops et131x_netdev_ops = {
4538         .ndo_open               = et131x_open,
4539         .ndo_stop               = et131x_close,
4540         .ndo_start_xmit         = et131x_tx,
4541         .ndo_set_rx_mode        = et131x_multicast,
4542         .ndo_tx_timeout         = et131x_tx_timeout,
4543         .ndo_change_mtu         = et131x_change_mtu,
4544         .ndo_set_mac_address    = et131x_set_mac_addr,
4545         .ndo_validate_addr      = eth_validate_addr,
4546         .ndo_get_stats          = et131x_stats,
4547         .ndo_do_ioctl           = et131x_ioctl,
4548 };
4549 
4550 /* et131x_pci_setup - Perform device initialization
4551  * @pdev: a pointer to the device's pci_dev structure
4552  * @ent: this device's entry in the pci_device_id table
4553  *
4554  * Registered in the pci_driver structure, this function is called when the
4555  * PCI subsystem finds a new PCI device which matches the information
4556  * contained in the pci_device_id table. This routine is the equivalent to
4557  * a device insertion routine.
4558  */
4559 static int et131x_pci_setup(struct pci_dev *pdev,
4560                             const struct pci_device_id *ent)
4561 {
4562         struct net_device *netdev;
4563         struct et131x_adapter *adapter;
4564         int rc;
4565         int ii;
4566 
4567         rc = pci_enable_device(pdev);
4568         if (rc < 0) {
4569                 dev_err(&pdev->dev, "pci_enable_device() failed\n");
4570                 goto out;
4571         }
4572 
4573         /* Perform some basic PCI checks */
4574         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4575                 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
4576                 rc = -ENODEV;
4577                 goto err_disable;
4578         }
4579 
4580         rc = pci_request_regions(pdev, DRIVER_NAME);
4581         if (rc < 0) {
4582                 dev_err(&pdev->dev, "Can't get PCI resources\n");
4583                 goto err_disable;
4584         }
4585 
4586         pci_set_master(pdev);
4587 
4588         /* Check the DMA addressing support of this device */
4589         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
4590             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
4591                 dev_err(&pdev->dev, "No usable DMA addressing method\n");
4592                 rc = -EIO;
4593                 goto err_release_res;
4594         }
4595 
4596         /* Allocate netdev and private adapter structs */
4597         netdev = alloc_etherdev(sizeof(struct et131x_adapter));
4598         if (!netdev) {
4599                 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
4600                 rc = -ENOMEM;
4601                 goto err_release_res;
4602         }
4603 
4604         netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
4605         netdev->netdev_ops     = &et131x_netdev_ops;
4606 
4607         SET_NETDEV_DEV(netdev, &pdev->dev);
4608         netdev->ethtool_ops = &et131x_ethtool_ops;
4609 
4610         adapter = et131x_adapter_init(netdev, pdev);
4611 
4612         rc = et131x_pci_init(adapter, pdev);
4613         if (rc < 0)
4614                 goto err_free_dev;
4615 
4616         /* Map the bus-relative registers to system virtual memory */
4617         adapter->regs = pci_ioremap_bar(pdev, 0);
4618         if (!adapter->regs) {
4619                 dev_err(&pdev->dev, "Cannot map device registers\n");
4620                 rc = -ENOMEM;
4621                 goto err_free_dev;
4622         }
4623 
4624         /* If Phy COMA mode was enabled when we went down, disable it here. */
4625         writel(ET_PMCSR_INIT,  &adapter->regs->global.pm_csr);
4626 
4627         /* Issue a global reset to the et1310 */
4628         et131x_soft_reset(adapter);
4629 
4630         /* Disable all interrupts (paranoid) */
4631         et131x_disable_interrupts(adapter);
4632 
4633         /* Allocate DMA memory */
4634         rc = et131x_adapter_memory_alloc(adapter);
4635         if (rc < 0) {
4636                 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
4637                 goto err_iounmap;
4638         }
4639 
4640         /* Init send data structures */
4641         et131x_init_send(adapter);
4642 
4643         /* Set up the task structure for the ISR's deferred handler */
4644         INIT_WORK(&adapter->task, et131x_isr_handler);
4645 
4646         /* Copy address into the net_device struct */
4647         memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4648 
4649         /* Init variable for counting how long we do not have link status */
4650         adapter->boot_coma = 0;
4651         et1310_disable_phy_coma(adapter);
4652 
4653         rc = -ENOMEM;
4654 
4655         /* Setup the mii_bus struct */
4656         adapter->mii_bus = mdiobus_alloc();
4657         if (!adapter->mii_bus) {
4658                 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4659                 goto err_mem_free;
4660         }
4661 
4662         adapter->mii_bus->name = "et131x_eth_mii";
4663         snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4664                 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4665         adapter->mii_bus->priv = netdev;
4666         adapter->mii_bus->read = et131x_mdio_read;
4667         adapter->mii_bus->write = et131x_mdio_write;
4668         adapter->mii_bus->reset = et131x_mdio_reset;
4669         adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
4670                                               GFP_KERNEL);
4671         if (!adapter->mii_bus->irq)
4672                 goto err_mdio_free;
4673 
4674         for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4675                 adapter->mii_bus->irq[ii] = PHY_POLL;
4676 
4677         rc = mdiobus_register(adapter->mii_bus);
4678         if (rc < 0) {
4679                 dev_err(&pdev->dev, "failed to register MII bus\n");
4680                 goto err_mdio_free_irq;
4681         }
4682 
4683         rc = et131x_mii_probe(netdev);
4684         if (rc < 0) {
4685                 dev_err(&pdev->dev, "failed to probe MII bus\n");
4686                 goto err_mdio_unregister;
4687         }
4688 
4689         /* Setup et1310 as per the documentation */
4690         et131x_adapter_setup(adapter);
4691 
4692         /* We can enable interrupts now
4693          *
4694          *  NOTE - Because registration of interrupt handler is done in the
4695          *         device's open(), defer enabling device interrupts to that
4696          *         point
4697          */
4698 
4699         /* Register the net_device struct with the Linux network layer */
4700         rc = register_netdev(netdev);
4701         if (rc < 0) {
4702                 dev_err(&pdev->dev, "register_netdev() failed\n");
4703                 goto err_phy_disconnect;
4704         }
4705 
4706         /* Register the net_device struct with the PCI subsystem. Save a copy
4707          * of the PCI config space for this device now that the device has
4708          * been initialized, just in case it needs to be quickly restored.
4709          */
4710         pci_set_drvdata(pdev, netdev);
4711 out:
4712         return rc;
4713 
4714 err_phy_disconnect:
4715         phy_disconnect(adapter->phydev);
4716 err_mdio_unregister:
4717         mdiobus_unregister(adapter->mii_bus);
4718 err_mdio_free_irq:
4719         kfree(adapter->mii_bus->irq);
4720 err_mdio_free:
4721         mdiobus_free(adapter->mii_bus);
4722 err_mem_free:
4723         et131x_adapter_memory_free(adapter);
4724 err_iounmap:
4725         iounmap(adapter->regs);
4726 err_free_dev:
4727         pci_dev_put(pdev);
4728         free_netdev(netdev);
4729 err_release_res:
4730         pci_release_regions(pdev);
4731 err_disable:
4732         pci_disable_device(pdev);
4733         goto out;
4734 }
4735 
4736 static const struct pci_device_id et131x_pci_table[] = {
4737         { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4738         { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4739         {0,}
4740 };
4741 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4742 
4743 static struct pci_driver et131x_driver = {
4744         .name           = DRIVER_NAME,
4745         .id_table       = et131x_pci_table,
4746         .probe          = et131x_pci_setup,
4747         .remove         = et131x_pci_remove,
4748         .driver.pm      = ET131X_PM_OPS,
4749 };
4750 
4751 module_pci_driver(et131x_driver);
4752 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us