Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/drivers/net/ethernet/via/via-velocity.c

  1 /*
  2  * This code is derived from the VIA reference driver (copyright message
  3  * below) provided to Red Hat by VIA Networking Technologies, Inc. for
  4  * addition to the Linux kernel.
  5  *
  6  * The code has been merged into one source file, cleaned up to follow
  7  * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
  8  * for 64bit hardware platforms.
  9  *
 10  * TODO
 11  *      rx_copybreak/alignment
 12  *      More testing
 13  *
 14  * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
 15  * Additional fixes and clean up: Francois Romieu
 16  *
 17  * This source has not been verified for use in safety critical systems.
 18  *
 19  * Please direct queries about the revamped driver to the linux-kernel
 20  * list not VIA.
 21  *
 22  * Original code:
 23  *
 24  * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
 25  * All rights reserved.
 26  *
 27  * This software may be redistributed and/or modified under
 28  * the terms of the GNU General Public License as published by the Free
 29  * Software Foundation; either version 2 of the License, or
 30  * any later version.
 31  *
 32  * This program is distributed in the hope that it will be useful, but
 33  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 34  * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
 35  * for more details.
 36  *
 37  * Author: Chuang Liang-Shing, AJ Jiang
 38  *
 39  * Date: Jan 24, 2003
 40  *
 41  * MODULE_LICENSE("GPL");
 42  *
 43  */
 44 
 45 #include <linux/module.h>
 46 #include <linux/types.h>
 47 #include <linux/bitops.h>
 48 #include <linux/init.h>
 49 #include <linux/dma-mapping.h>
 50 #include <linux/mm.h>
 51 #include <linux/errno.h>
 52 #include <linux/ioport.h>
 53 #include <linux/pci.h>
 54 #include <linux/kernel.h>
 55 #include <linux/netdevice.h>
 56 #include <linux/etherdevice.h>
 57 #include <linux/skbuff.h>
 58 #include <linux/delay.h>
 59 #include <linux/timer.h>
 60 #include <linux/slab.h>
 61 #include <linux/interrupt.h>
 62 #include <linux/string.h>
 63 #include <linux/wait.h>
 64 #include <linux/io.h>
 65 #include <linux/if.h>
 66 #include <linux/uaccess.h>
 67 #include <linux/proc_fs.h>
 68 #include <linux/of_address.h>
 69 #include <linux/of_device.h>
 70 #include <linux/of_irq.h>
 71 #include <linux/inetdevice.h>
 72 #include <linux/platform_device.h>
 73 #include <linux/reboot.h>
 74 #include <linux/ethtool.h>
 75 #include <linux/mii.h>
 76 #include <linux/in.h>
 77 #include <linux/if_arp.h>
 78 #include <linux/if_vlan.h>
 79 #include <linux/ip.h>
 80 #include <linux/tcp.h>
 81 #include <linux/udp.h>
 82 #include <linux/crc-ccitt.h>
 83 #include <linux/crc32.h>
 84 
 85 #include "via-velocity.h"
 86 
 87 enum velocity_bus_type {
 88         BUS_PCI,
 89         BUS_PLATFORM,
 90 };
 91 
 92 static int velocity_nics;
 93 static int msglevel = MSG_LEVEL_INFO;
 94 
 95 static void velocity_set_power_state(struct velocity_info *vptr, char state)
 96 {
 97         void *addr = vptr->mac_regs;
 98 
 99         if (vptr->pdev)
100                 pci_set_power_state(vptr->pdev, state);
101         else
102                 writeb(state, addr + 0x154);
103 }
104 
105 /**
106  *      mac_get_cam_mask        -       Read a CAM mask
107  *      @regs: register block for this velocity
108  *      @mask: buffer to store mask
109  *
110  *      Fetch the mask bits of the selected CAM and store them into the
111  *      provided mask buffer.
112  */
113 static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
114 {
115         int i;
116 
117         /* Select CAM mask */
118         BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
119 
120         writeb(0, &regs->CAMADDR);
121 
122         /* read mask */
123         for (i = 0; i < 8; i++)
124                 *mask++ = readb(&(regs->MARCAM[i]));
125 
126         /* disable CAMEN */
127         writeb(0, &regs->CAMADDR);
128 
129         /* Select mar */
130         BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
131 }
132 
133 /**
134  *      mac_set_cam_mask        -       Set a CAM mask
135  *      @regs: register block for this velocity
136  *      @mask: CAM mask to load
137  *
138  *      Store a new mask into a CAM
139  */
140 static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
141 {
142         int i;
143         /* Select CAM mask */
144         BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
145 
146         writeb(CAMADDR_CAMEN, &regs->CAMADDR);
147 
148         for (i = 0; i < 8; i++)
149                 writeb(*mask++, &(regs->MARCAM[i]));
150 
151         /* disable CAMEN */
152         writeb(0, &regs->CAMADDR);
153 
154         /* Select mar */
155         BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
156 }
157 
158 static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
159 {
160         int i;
161         /* Select CAM mask */
162         BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
163 
164         writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
165 
166         for (i = 0; i < 8; i++)
167                 writeb(*mask++, &(regs->MARCAM[i]));
168 
169         /* disable CAMEN */
170         writeb(0, &regs->CAMADDR);
171 
172         /* Select mar */
173         BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
174 }
175 
176 /**
177  *      mac_set_cam     -       set CAM data
178  *      @regs: register block of this velocity
179  *      @idx: Cam index
180  *      @addr: 2 or 6 bytes of CAM data
181  *
182  *      Load an address or vlan tag into a CAM
183  */
184 static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
185 {
186         int i;
187 
188         /* Select CAM mask */
189         BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
190 
191         idx &= (64 - 1);
192 
193         writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
194 
195         for (i = 0; i < 6; i++)
196                 writeb(*addr++, &(regs->MARCAM[i]));
197 
198         BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
199 
200         udelay(10);
201 
202         writeb(0, &regs->CAMADDR);
203 
204         /* Select mar */
205         BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
206 }
207 
208 static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
209                              const u8 *addr)
210 {
211 
212         /* Select CAM mask */
213         BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
214 
215         idx &= (64 - 1);
216 
217         writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
218         writew(*((u16 *) addr), &regs->MARCAM[0]);
219 
220         BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
221 
222         udelay(10);
223 
224         writeb(0, &regs->CAMADDR);
225 
226         /* Select mar */
227         BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
228 }
229 
230 
231 /**
232  *      mac_wol_reset   -       reset WOL after exiting low power
233  *      @regs: register block of this velocity
234  *
235  *      Called after we drop out of wake on lan mode in order to
236  *      reset the Wake on lan features. This function doesn't restore
237  *      the rest of the logic from the result of sleep/wakeup
238  */
239 static void mac_wol_reset(struct mac_regs __iomem *regs)
240 {
241 
242         /* Turn off SWPTAG right after leaving power mode */
243         BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
244         /* clear sticky bits */
245         BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
246 
247         BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
248         BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
249         /* disable force PME-enable */
250         writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
251         /* disable power-event config bit */
252         writew(0xFFFF, &regs->WOLCRClr);
253         /* clear power status */
254         writew(0xFFFF, &regs->WOLSRClr);
255 }
256 
257 static const struct ethtool_ops velocity_ethtool_ops;
258 
259 /*
260     Define module options
261 */
262 
263 MODULE_AUTHOR("VIA Networking Technologies, Inc.");
264 MODULE_LICENSE("GPL");
265 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
266 
267 #define VELOCITY_PARAM(N, D) \
268         static int N[MAX_UNITS] = OPTION_DEFAULT;\
269         module_param_array(N, int, NULL, 0); \
270         MODULE_PARM_DESC(N, D);
271 
272 #define RX_DESC_MIN     64
273 #define RX_DESC_MAX     255
274 #define RX_DESC_DEF     64
275 VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
276 
277 #define TX_DESC_MIN     16
278 #define TX_DESC_MAX     256
279 #define TX_DESC_DEF     64
280 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
281 
282 #define RX_THRESH_MIN   0
283 #define RX_THRESH_MAX   3
284 #define RX_THRESH_DEF   0
285 /* rx_thresh[] is used for controlling the receive fifo threshold.
286    0: indicate the rxfifo threshold is 128 bytes.
287    1: indicate the rxfifo threshold is 512 bytes.
288    2: indicate the rxfifo threshold is 1024 bytes.
289    3: indicate the rxfifo threshold is store & forward.
290 */
291 VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
292 
293 #define DMA_LENGTH_MIN  0
294 #define DMA_LENGTH_MAX  7
295 #define DMA_LENGTH_DEF  6
296 
297 /* DMA_length[] is used for controlling the DMA length
298    0: 8 DWORDs
299    1: 16 DWORDs
300    2: 32 DWORDs
301    3: 64 DWORDs
302    4: 128 DWORDs
303    5: 256 DWORDs
304    6: SF(flush till emply)
305    7: SF(flush till emply)
306 */
307 VELOCITY_PARAM(DMA_length, "DMA length");
308 
309 #define IP_ALIG_DEF     0
310 /* IP_byte_align[] is used for IP header DWORD byte aligned
311    0: indicate the IP header won't be DWORD byte aligned.(Default) .
312    1: indicate the IP header will be DWORD byte aligned.
313       In some environment, the IP header should be DWORD byte aligned,
314       or the packet will be droped when we receive it. (eg: IPVS)
315 */
316 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
317 
318 #define FLOW_CNTL_DEF   1
319 #define FLOW_CNTL_MIN   1
320 #define FLOW_CNTL_MAX   5
321 
322 /* flow_control[] is used for setting the flow control ability of NIC.
323    1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
324    2: enable TX flow control.
325    3: enable RX flow control.
326    4: enable RX/TX flow control.
327    5: disable
328 */
329 VELOCITY_PARAM(flow_control, "Enable flow control ability");
330 
331 #define MED_LNK_DEF 0
332 #define MED_LNK_MIN 0
333 #define MED_LNK_MAX 5
334 /* speed_duplex[] is used for setting the speed and duplex mode of NIC.
335    0: indicate autonegotiation for both speed and duplex mode
336    1: indicate 100Mbps half duplex mode
337    2: indicate 100Mbps full duplex mode
338    3: indicate 10Mbps half duplex mode
339    4: indicate 10Mbps full duplex mode
340    5: indicate 1000Mbps full duplex mode
341 
342    Note:
343    if EEPROM have been set to the force mode, this option is ignored
344    by driver.
345 */
346 VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
347 
348 #define VAL_PKT_LEN_DEF     0
349 /* ValPktLen[] is used for setting the checksum offload ability of NIC.
350    0: Receive frame with invalid layer 2 length (Default)
351    1: Drop frame with invalid layer 2 length
352 */
353 VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
354 
355 #define WOL_OPT_DEF     0
356 #define WOL_OPT_MIN     0
357 #define WOL_OPT_MAX     7
358 /* wol_opts[] is used for controlling wake on lan behavior.
359    0: Wake up if recevied a magic packet. (Default)
360    1: Wake up if link status is on/off.
361    2: Wake up if recevied an arp packet.
362    4: Wake up if recevied any unicast packet.
363    Those value can be sumed up to support more than one option.
364 */
365 VELOCITY_PARAM(wol_opts, "Wake On Lan options");
366 
367 static int rx_copybreak = 200;
368 module_param(rx_copybreak, int, 0644);
369 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
370 
371 /*
372  *      Internal board variants. At the moment we have only one
373  */
374 static struct velocity_info_tbl chip_info_table[] = {
375         {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
376         { }
377 };
378 
379 /*
380  *      Describe the PCI device identifiers that we support in this
381  *      device driver. Used for hotplug autoloading.
382  */
383 
384 static DEFINE_PCI_DEVICE_TABLE(velocity_pci_id_table) = {
385         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
386         { }
387 };
388 
389 MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
390 
391 /**
392  *      Describe the OF device identifiers that we support in this
393  *      device driver. Used for devicetree nodes.
394  */
395 static struct of_device_id velocity_of_ids[] = {
396         { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
397         { /* Sentinel */ },
398 };
399 MODULE_DEVICE_TABLE(of, velocity_of_ids);
400 
401 /**
402  *      get_chip_name   -       identifier to name
403  *      @id: chip identifier
404  *
405  *      Given a chip identifier return a suitable description. Returns
406  *      a pointer a static string valid while the driver is loaded.
407  */
408 static const char *get_chip_name(enum chip_type chip_id)
409 {
410         int i;
411         for (i = 0; chip_info_table[i].name != NULL; i++)
412                 if (chip_info_table[i].chip_id == chip_id)
413                         break;
414         return chip_info_table[i].name;
415 }
416 
417 /**
418  *      velocity_set_int_opt    -       parser for integer options
419  *      @opt: pointer to option value
420  *      @val: value the user requested (or -1 for default)
421  *      @min: lowest value allowed
422  *      @max: highest value allowed
423  *      @def: default value
424  *      @name: property name
425  *      @dev: device name
426  *
427  *      Set an integer property in the module options. This function does
428  *      all the verification and checking as well as reporting so that
429  *      we don't duplicate code for each option.
430  */
431 static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
432                                  char *name, const char *devname)
433 {
434         if (val == -1)
435                 *opt = def;
436         else if (val < min || val > max) {
437                 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
438                                         devname, name, min, max);
439                 *opt = def;
440         } else {
441                 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
442                                         devname, name, val);
443                 *opt = val;
444         }
445 }
446 
447 /**
448  *      velocity_set_bool_opt   -       parser for boolean options
449  *      @opt: pointer to option value
450  *      @val: value the user requested (or -1 for default)
451  *      @def: default value (yes/no)
452  *      @flag: numeric value to set for true.
453  *      @name: property name
454  *      @dev: device name
455  *
456  *      Set a boolean property in the module options. This function does
457  *      all the verification and checking as well as reporting so that
458  *      we don't duplicate code for each option.
459  */
460 static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
461                                   char *name, const char *devname)
462 {
463         (*opt) &= (~flag);
464         if (val == -1)
465                 *opt |= (def ? flag : 0);
466         else if (val < 0 || val > 1) {
467                 printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
468                         devname, name);
469                 *opt |= (def ? flag : 0);
470         } else {
471                 printk(KERN_INFO "%s: set parameter %s to %s\n",
472                         devname, name, val ? "TRUE" : "FALSE");
473                 *opt |= (val ? flag : 0);
474         }
475 }
476 
477 /**
478  *      velocity_get_options    -       set options on device
479  *      @opts: option structure for the device
480  *      @index: index of option to use in module options array
481  *      @devname: device name
482  *
483  *      Turn the module and command options into a single structure
484  *      for the current device
485  */
486 static void velocity_get_options(struct velocity_opt *opts, int index,
487                                  const char *devname)
488 {
489 
490         velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
491         velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
492         velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
493         velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
494 
495         velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
496         velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
497         velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
498         velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
499         velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
500         opts->numrx = (opts->numrx & ~3);
501 }
502 
503 /**
504  *      velocity_init_cam_filter        -       initialise CAM
505  *      @vptr: velocity to program
506  *
507  *      Initialize the content addressable memory used for filters. Load
508  *      appropriately according to the presence of VLAN
509  */
510 static void velocity_init_cam_filter(struct velocity_info *vptr)
511 {
512         struct mac_regs __iomem *regs = vptr->mac_regs;
513         unsigned int vid, i = 0;
514 
515         /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
516         WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
517         WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
518 
519         /* Disable all CAMs */
520         memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
521         memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
522         mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
523         mac_set_cam_mask(regs, vptr->mCAMmask);
524 
525         /* Enable VCAMs */
526         for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
527                 mac_set_vlan_cam(regs, i, (u8 *) &vid);
528                 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
529                 if (++i >= VCAM_SIZE)
530                         break;
531         }
532         mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
533 }
534 
535 static int velocity_vlan_rx_add_vid(struct net_device *dev,
536                                     __be16 proto, u16 vid)
537 {
538         struct velocity_info *vptr = netdev_priv(dev);
539 
540         spin_lock_irq(&vptr->lock);
541         set_bit(vid, vptr->active_vlans);
542         velocity_init_cam_filter(vptr);
543         spin_unlock_irq(&vptr->lock);
544         return 0;
545 }
546 
547 static int velocity_vlan_rx_kill_vid(struct net_device *dev,
548                                      __be16 proto, u16 vid)
549 {
550         struct velocity_info *vptr = netdev_priv(dev);
551 
552         spin_lock_irq(&vptr->lock);
553         clear_bit(vid, vptr->active_vlans);
554         velocity_init_cam_filter(vptr);
555         spin_unlock_irq(&vptr->lock);
556         return 0;
557 }
558 
559 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
560 {
561         vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
562 }
563 
564 /**
565  *      velocity_rx_reset       -       handle a receive reset
566  *      @vptr: velocity we are resetting
567  *
568  *      Reset the ownership and status for the receive ring side.
569  *      Hand all the receive queue to the NIC.
570  */
571 static void velocity_rx_reset(struct velocity_info *vptr)
572 {
573 
574         struct mac_regs __iomem *regs = vptr->mac_regs;
575         int i;
576 
577         velocity_init_rx_ring_indexes(vptr);
578 
579         /*
580          *      Init state, all RD entries belong to the NIC
581          */
582         for (i = 0; i < vptr->options.numrx; ++i)
583                 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
584 
585         writew(vptr->options.numrx, &regs->RBRDU);
586         writel(vptr->rx.pool_dma, &regs->RDBaseLo);
587         writew(0, &regs->RDIdx);
588         writew(vptr->options.numrx - 1, &regs->RDCSize);
589 }
590 
591 /**
592  *      velocity_get_opt_media_mode     -       get media selection
593  *      @vptr: velocity adapter
594  *
595  *      Get the media mode stored in EEPROM or module options and load
596  *      mii_status accordingly. The requested link state information
597  *      is also returned.
598  */
599 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
600 {
601         u32 status = 0;
602 
603         switch (vptr->options.spd_dpx) {
604         case SPD_DPX_AUTO:
605                 status = VELOCITY_AUTONEG_ENABLE;
606                 break;
607         case SPD_DPX_100_FULL:
608                 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
609                 break;
610         case SPD_DPX_10_FULL:
611                 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
612                 break;
613         case SPD_DPX_100_HALF:
614                 status = VELOCITY_SPEED_100;
615                 break;
616         case SPD_DPX_10_HALF:
617                 status = VELOCITY_SPEED_10;
618                 break;
619         case SPD_DPX_1000_FULL:
620                 status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
621                 break;
622         }
623         vptr->mii_status = status;
624         return status;
625 }
626 
627 /**
628  *      safe_disable_mii_autopoll       -       autopoll off
629  *      @regs: velocity registers
630  *
631  *      Turn off the autopoll and wait for it to disable on the chip
632  */
633 static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
634 {
635         u16 ww;
636 
637         /*  turn off MAUTO */
638         writeb(0, &regs->MIICR);
639         for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
640                 udelay(1);
641                 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
642                         break;
643         }
644 }
645 
646 /**
647  *      enable_mii_autopoll     -       turn on autopolling
648  *      @regs: velocity registers
649  *
650  *      Enable the MII link status autopoll feature on the Velocity
651  *      hardware. Wait for it to enable.
652  */
653 static void enable_mii_autopoll(struct mac_regs __iomem *regs)
654 {
655         int ii;
656 
657         writeb(0, &(regs->MIICR));
658         writeb(MIIADR_SWMPL, &regs->MIIADR);
659 
660         for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
661                 udelay(1);
662                 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
663                         break;
664         }
665 
666         writeb(MIICR_MAUTO, &regs->MIICR);
667 
668         for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
669                 udelay(1);
670                 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
671                         break;
672         }
673 
674 }
675 
676 /**
677  *      velocity_mii_read       -       read MII data
678  *      @regs: velocity registers
679  *      @index: MII register index
680  *      @data: buffer for received data
681  *
682  *      Perform a single read of an MII 16bit register. Returns zero
683  *      on success or -ETIMEDOUT if the PHY did not respond.
684  */
685 static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
686 {
687         u16 ww;
688 
689         /*
690          *      Disable MIICR_MAUTO, so that mii addr can be set normally
691          */
692         safe_disable_mii_autopoll(regs);
693 
694         writeb(index, &regs->MIIADR);
695 
696         BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
697 
698         for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
699                 if (!(readb(&regs->MIICR) & MIICR_RCMD))
700                         break;
701         }
702 
703         *data = readw(&regs->MIIDATA);
704 
705         enable_mii_autopoll(regs);
706         if (ww == W_MAX_TIMEOUT)
707                 return -ETIMEDOUT;
708         return 0;
709 }
710 
711 /**
712  *      mii_check_media_mode    -       check media state
713  *      @regs: velocity registers
714  *
715  *      Check the current MII status and determine the link status
716  *      accordingly
717  */
718 static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
719 {
720         u32 status = 0;
721         u16 ANAR;
722 
723         if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
724                 status |= VELOCITY_LINK_FAIL;
725 
726         if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
727                 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
728         else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
729                 status |= (VELOCITY_SPEED_1000);
730         else {
731                 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
732                 if (ANAR & ADVERTISE_100FULL)
733                         status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
734                 else if (ANAR & ADVERTISE_100HALF)
735                         status |= VELOCITY_SPEED_100;
736                 else if (ANAR & ADVERTISE_10FULL)
737                         status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
738                 else
739                         status |= (VELOCITY_SPEED_10);
740         }
741 
742         if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
743                 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
744                 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
745                     == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
746                         if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
747                                 status |= VELOCITY_AUTONEG_ENABLE;
748                 }
749         }
750 
751         return status;
752 }
753 
754 /**
755  *      velocity_mii_write      -       write MII data
756  *      @regs: velocity registers
757  *      @index: MII register index
758  *      @data: 16bit data for the MII register
759  *
760  *      Perform a single write to an MII 16bit register. Returns zero
761  *      on success or -ETIMEDOUT if the PHY did not respond.
762  */
763 static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
764 {
765         u16 ww;
766 
767         /*
768          *      Disable MIICR_MAUTO, so that mii addr can be set normally
769          */
770         safe_disable_mii_autopoll(regs);
771 
772         /* MII reg offset */
773         writeb(mii_addr, &regs->MIIADR);
774         /* set MII data */
775         writew(data, &regs->MIIDATA);
776 
777         /* turn on MIICR_WCMD */
778         BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
779 
780         /* W_MAX_TIMEOUT is the timeout period */
781         for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
782                 udelay(5);
783                 if (!(readb(&regs->MIICR) & MIICR_WCMD))
784                         break;
785         }
786         enable_mii_autopoll(regs);
787 
788         if (ww == W_MAX_TIMEOUT)
789                 return -ETIMEDOUT;
790         return 0;
791 }
792 
793 /**
794  *      set_mii_flow_control    -       flow control setup
795  *      @vptr: velocity interface
796  *
797  *      Set up the flow control on this interface according to
798  *      the supplied user/eeprom options.
799  */
800 static void set_mii_flow_control(struct velocity_info *vptr)
801 {
802         /*Enable or Disable PAUSE in ANAR */
803         switch (vptr->options.flow_cntl) {
804         case FLOW_CNTL_TX:
805                 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
806                 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
807                 break;
808 
809         case FLOW_CNTL_RX:
810                 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
811                 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
812                 break;
813 
814         case FLOW_CNTL_TX_RX:
815                 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
816                 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
817                 break;
818 
819         case FLOW_CNTL_DISABLE:
820                 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
821                 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
822                 break;
823         default:
824                 break;
825         }
826 }
827 
828 /**
829  *      mii_set_auto_on         -       autonegotiate on
830  *      @vptr: velocity
831  *
832  *      Enable autonegotation on this interface
833  */
834 static void mii_set_auto_on(struct velocity_info *vptr)
835 {
836         if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
837                 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
838         else
839                 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
840 }
841 
842 static u32 check_connection_type(struct mac_regs __iomem *regs)
843 {
844         u32 status = 0;
845         u8 PHYSR0;
846         u16 ANAR;
847         PHYSR0 = readb(&regs->PHYSR0);
848 
849         /*
850            if (!(PHYSR0 & PHYSR0_LINKGD))
851            status|=VELOCITY_LINK_FAIL;
852          */
853 
854         if (PHYSR0 & PHYSR0_FDPX)
855                 status |= VELOCITY_DUPLEX_FULL;
856 
857         if (PHYSR0 & PHYSR0_SPDG)
858                 status |= VELOCITY_SPEED_1000;
859         else if (PHYSR0 & PHYSR0_SPD10)
860                 status |= VELOCITY_SPEED_10;
861         else
862                 status |= VELOCITY_SPEED_100;
863 
864         if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
865                 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
866                 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
867                     == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
868                         if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
869                                 status |= VELOCITY_AUTONEG_ENABLE;
870                 }
871         }
872 
873         return status;
874 }
875 
876 /**
877  *      velocity_set_media_mode         -       set media mode
878  *      @mii_status: old MII link state
879  *
880  *      Check the media link state and configure the flow control
881  *      PHY and also velocity hardware setup accordingly. In particular
882  *      we need to set up CD polling and frame bursting.
883  */
884 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
885 {
886         u32 curr_status;
887         struct mac_regs __iomem *regs = vptr->mac_regs;
888 
889         vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
890         curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
891 
892         /* Set mii link status */
893         set_mii_flow_control(vptr);
894 
895         /*
896            Check if new status is consistent with current status
897            if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
898                (mii_status==curr_status)) {
899            vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
900            vptr->mii_status=check_connection_type(vptr->mac_regs);
901            VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
902            return 0;
903            }
904          */
905 
906         if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
907                 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
908 
909         /*
910          *      If connection type is AUTO
911          */
912         if (mii_status & VELOCITY_AUTONEG_ENABLE) {
913                 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
914                 /* clear force MAC mode bit */
915                 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
916                 /* set duplex mode of MAC according to duplex mode of MII */
917                 MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
918                 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
919                 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
920 
921                 /* enable AUTO-NEGO mode */
922                 mii_set_auto_on(vptr);
923         } else {
924                 u16 CTRL1000;
925                 u16 ANAR;
926                 u8 CHIPGCR;
927 
928                 /*
929                  * 1. if it's 3119, disable frame bursting in halfduplex mode
930                  *    and enable it in fullduplex mode
931                  * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
932                  * 3. only enable CD heart beat counter in 10HD mode
933                  */
934 
935                 /* set force MAC mode bit */
936                 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
937 
938                 CHIPGCR = readb(&regs->CHIPGCR);
939 
940                 if (mii_status & VELOCITY_SPEED_1000)
941                         CHIPGCR |= CHIPGCR_FCGMII;
942                 else
943                         CHIPGCR &= ~CHIPGCR_FCGMII;
944 
945                 if (mii_status & VELOCITY_DUPLEX_FULL) {
946                         CHIPGCR |= CHIPGCR_FCFDX;
947                         writeb(CHIPGCR, &regs->CHIPGCR);
948                         VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
949                         if (vptr->rev_id < REV_ID_VT3216_A0)
950                                 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
951                 } else {
952                         CHIPGCR &= ~CHIPGCR_FCFDX;
953                         VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
954                         writeb(CHIPGCR, &regs->CHIPGCR);
955                         if (vptr->rev_id < REV_ID_VT3216_A0)
956                                 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
957                 }
958 
959                 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
960                 CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
961                 if ((mii_status & VELOCITY_SPEED_1000) &&
962                     (mii_status & VELOCITY_DUPLEX_FULL)) {
963                         CTRL1000 |= ADVERTISE_1000FULL;
964                 }
965                 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
966 
967                 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
968                         BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
969                 else
970                         BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
971 
972                 /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
973                 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
974                 ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
975                 if (mii_status & VELOCITY_SPEED_100) {
976                         if (mii_status & VELOCITY_DUPLEX_FULL)
977                                 ANAR |= ADVERTISE_100FULL;
978                         else
979                                 ANAR |= ADVERTISE_100HALF;
980                 } else if (mii_status & VELOCITY_SPEED_10) {
981                         if (mii_status & VELOCITY_DUPLEX_FULL)
982                                 ANAR |= ADVERTISE_10FULL;
983                         else
984                                 ANAR |= ADVERTISE_10HALF;
985                 }
986                 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
987                 /* enable AUTO-NEGO mode */
988                 mii_set_auto_on(vptr);
989                 /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
990         }
991         /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
992         /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
993         return VELOCITY_LINK_CHANGE;
994 }
995 
996 /**
997  *      velocity_print_link_status      -       link status reporting
998  *      @vptr: velocity to report on
999  *
1000  *      Turn the link status of the velocity card into a kernel log
1001  *      description of the new link state, detailing speed and duplex
1002  *      status
1003  */
1004 static void velocity_print_link_status(struct velocity_info *vptr)
1005 {
1006 
1007         if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1008                 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name);
1009         } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1010                 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name);
1011 
1012                 if (vptr->mii_status & VELOCITY_SPEED_1000)
1013                         VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1014                 else if (vptr->mii_status & VELOCITY_SPEED_100)
1015                         VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1016                 else
1017                         VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1018 
1019                 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1020                         VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1021                 else
1022                         VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1023         } else {
1024                 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name);
1025                 switch (vptr->options.spd_dpx) {
1026                 case SPD_DPX_1000_FULL:
1027                         VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1028                         break;
1029                 case SPD_DPX_100_HALF:
1030                         VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1031                         break;
1032                 case SPD_DPX_100_FULL:
1033                         VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1034                         break;
1035                 case SPD_DPX_10_HALF:
1036                         VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1037                         break;
1038                 case SPD_DPX_10_FULL:
1039                         VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1040                         break;
1041                 default:
1042                         break;
1043                 }
1044         }
1045 }
1046 
1047 /**
1048  *      enable_flow_control_ability     -       flow control
1049  *      @vptr: veloity to configure
1050  *
1051  *      Set up flow control according to the flow control options
1052  *      determined by the eeprom/configuration.
1053  */
1054 static void enable_flow_control_ability(struct velocity_info *vptr)
1055 {
1056 
1057         struct mac_regs __iomem *regs = vptr->mac_regs;
1058 
1059         switch (vptr->options.flow_cntl) {
1060 
1061         case FLOW_CNTL_DEFAULT:
1062                 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1063                         writel(CR0_FDXRFCEN, &regs->CR0Set);
1064                 else
1065                         writel(CR0_FDXRFCEN, &regs->CR0Clr);
1066 
1067                 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1068                         writel(CR0_FDXTFCEN, &regs->CR0Set);
1069                 else
1070                         writel(CR0_FDXTFCEN, &regs->CR0Clr);
1071                 break;
1072 
1073         case FLOW_CNTL_TX:
1074                 writel(CR0_FDXTFCEN, &regs->CR0Set);
1075                 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1076                 break;
1077 
1078         case FLOW_CNTL_RX:
1079                 writel(CR0_FDXRFCEN, &regs->CR0Set);
1080                 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1081                 break;
1082 
1083         case FLOW_CNTL_TX_RX:
1084                 writel(CR0_FDXTFCEN, &regs->CR0Set);
1085                 writel(CR0_FDXRFCEN, &regs->CR0Set);
1086                 break;
1087 
1088         case FLOW_CNTL_DISABLE:
1089                 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1090                 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1091                 break;
1092 
1093         default:
1094                 break;
1095         }
1096 
1097 }
1098 
1099 /**
1100  *      velocity_soft_reset     -       soft reset
1101  *      @vptr: velocity to reset
1102  *
1103  *      Kick off a soft reset of the velocity adapter and then poll
1104  *      until the reset sequence has completed before returning.
1105  */
1106 static int velocity_soft_reset(struct velocity_info *vptr)
1107 {
1108         struct mac_regs __iomem *regs = vptr->mac_regs;
1109         int i = 0;
1110 
1111         writel(CR0_SFRST, &regs->CR0Set);
1112 
1113         for (i = 0; i < W_MAX_TIMEOUT; i++) {
1114                 udelay(5);
1115                 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1116                         break;
1117         }
1118 
1119         if (i == W_MAX_TIMEOUT) {
1120                 writel(CR0_FORSRST, &regs->CR0Set);
1121                 /* FIXME: PCI POSTING */
1122                 /* delay 2ms */
1123                 mdelay(2);
1124         }
1125         return 0;
1126 }
1127 
1128 /**
1129  *      velocity_set_multi      -       filter list change callback
1130  *      @dev: network device
1131  *
1132  *      Called by the network layer when the filter lists need to change
1133  *      for a velocity adapter. Reload the CAMs with the new address
1134  *      filter ruleset.
1135  */
1136 static void velocity_set_multi(struct net_device *dev)
1137 {
1138         struct velocity_info *vptr = netdev_priv(dev);
1139         struct mac_regs __iomem *regs = vptr->mac_regs;
1140         u8 rx_mode;
1141         int i;
1142         struct netdev_hw_addr *ha;
1143 
1144         if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1145                 writel(0xffffffff, &regs->MARCAM[0]);
1146                 writel(0xffffffff, &regs->MARCAM[4]);
1147                 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1148         } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1149                    (dev->flags & IFF_ALLMULTI)) {
1150                 writel(0xffffffff, &regs->MARCAM[0]);
1151                 writel(0xffffffff, &regs->MARCAM[4]);
1152                 rx_mode = (RCR_AM | RCR_AB);
1153         } else {
1154                 int offset = MCAM_SIZE - vptr->multicast_limit;
1155                 mac_get_cam_mask(regs, vptr->mCAMmask);
1156 
1157                 i = 0;
1158                 netdev_for_each_mc_addr(ha, dev) {
1159                         mac_set_cam(regs, i + offset, ha->addr);
1160                         vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1161                         i++;
1162                 }
1163 
1164                 mac_set_cam_mask(regs, vptr->mCAMmask);
1165                 rx_mode = RCR_AM | RCR_AB | RCR_AP;
1166         }
1167         if (dev->mtu > 1500)
1168                 rx_mode |= RCR_AL;
1169 
1170         BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1171 
1172 }
1173 
1174 /*
1175  * MII access , media link mode setting functions
1176  */
1177 
1178 /**
1179  *      mii_init        -       set up MII
1180  *      @vptr: velocity adapter
1181  *      @mii_status:  links tatus
1182  *
1183  *      Set up the PHY for the current link state.
1184  */
1185 static void mii_init(struct velocity_info *vptr, u32 mii_status)
1186 {
1187         u16 BMCR;
1188 
1189         switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1190         case PHYID_ICPLUS_IP101A:
1191                 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1192                                                 MII_ADVERTISE, vptr->mac_regs);
1193                 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194                         MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1195                                                                 vptr->mac_regs);
1196                 else
1197                         MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1198                                                                 vptr->mac_regs);
1199                 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1200                 break;
1201         case PHYID_CICADA_CS8201:
1202                 /*
1203                  *      Reset to hardware default
1204                  */
1205                 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1206                 /*
1207                  *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1208                  *      off it in NWay-forced half mode for NWay-forced v.s.
1209                  *      legacy-forced issue.
1210                  */
1211                 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1212                         MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1213                 else
1214                         MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215                 /*
1216                  *      Turn on Link/Activity LED enable bit for CIS8201
1217                  */
1218                 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1219                 break;
1220         case PHYID_VT3216_32BIT:
1221         case PHYID_VT3216_64BIT:
1222                 /*
1223                  *      Reset to hardware default
1224                  */
1225                 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1226                 /*
1227                  *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1228                  *      off it in NWay-forced half mode for NWay-forced v.s.
1229                  *      legacy-forced issue
1230                  */
1231                 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1232                         MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1233                 else
1234                         MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1235                 break;
1236 
1237         case PHYID_MARVELL_1000:
1238         case PHYID_MARVELL_1000S:
1239                 /*
1240                  *      Assert CRS on Transmit
1241                  */
1242                 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1243                 /*
1244                  *      Reset to hardware default
1245                  */
1246                 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1247                 break;
1248         default:
1249                 ;
1250         }
1251         velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1252         if (BMCR & BMCR_ISOLATE) {
1253                 BMCR &= ~BMCR_ISOLATE;
1254                 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1255         }
1256 }
1257 
1258 /**
1259  * setup_queue_timers   -       Setup interrupt timers
1260  *
1261  * Setup interrupt frequency during suppression (timeout if the frame
1262  * count isn't filled).
1263  */
1264 static void setup_queue_timers(struct velocity_info *vptr)
1265 {
1266         /* Only for newer revisions */
1267         if (vptr->rev_id >= REV_ID_VT3216_A0) {
1268                 u8 txqueue_timer = 0;
1269                 u8 rxqueue_timer = 0;
1270 
1271                 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1272                                 VELOCITY_SPEED_100)) {
1273                         txqueue_timer = vptr->options.txqueue_timer;
1274                         rxqueue_timer = vptr->options.rxqueue_timer;
1275                 }
1276 
1277                 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1278                 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1279         }
1280 }
1281 
1282 /**
1283  * setup_adaptive_interrupts  -  Setup interrupt suppression
1284  *
1285  * @vptr velocity adapter
1286  *
1287  * The velocity is able to suppress interrupt during high interrupt load.
1288  * This function turns on that feature.
1289  */
1290 static void setup_adaptive_interrupts(struct velocity_info *vptr)
1291 {
1292         struct mac_regs __iomem *regs = vptr->mac_regs;
1293         u16 tx_intsup = vptr->options.tx_intsup;
1294         u16 rx_intsup = vptr->options.rx_intsup;
1295 
1296         /* Setup default interrupt mask (will be changed below) */
1297         vptr->int_mask = INT_MASK_DEF;
1298 
1299         /* Set Tx Interrupt Suppression Threshold */
1300         writeb(CAMCR_PS0, &regs->CAMCR);
1301         if (tx_intsup != 0) {
1302                 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1303                                 ISR_PTX2I | ISR_PTX3I);
1304                 writew(tx_intsup, &regs->ISRCTL);
1305         } else
1306                 writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1307 
1308         /* Set Rx Interrupt Suppression Threshold */
1309         writeb(CAMCR_PS1, &regs->CAMCR);
1310         if (rx_intsup != 0) {
1311                 vptr->int_mask &= ~ISR_PRXI;
1312                 writew(rx_intsup, &regs->ISRCTL);
1313         } else
1314                 writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1315 
1316         /* Select page to interrupt hold timer */
1317         writeb(0, &regs->CAMCR);
1318 }
1319 
1320 /**
1321  *      velocity_init_registers -       initialise MAC registers
1322  *      @vptr: velocity to init
1323  *      @type: type of initialisation (hot or cold)
1324  *
1325  *      Initialise the MAC on a reset or on first set up on the
1326  *      hardware.
1327  */
1328 static void velocity_init_registers(struct velocity_info *vptr,
1329                                     enum velocity_init_type type)
1330 {
1331         struct mac_regs __iomem *regs = vptr->mac_regs;
1332         struct net_device *netdev = vptr->netdev;
1333         int i, mii_status;
1334 
1335         mac_wol_reset(regs);
1336 
1337         switch (type) {
1338         case VELOCITY_INIT_RESET:
1339         case VELOCITY_INIT_WOL:
1340 
1341                 netif_stop_queue(netdev);
1342 
1343                 /*
1344                  *      Reset RX to prevent RX pointer not on the 4X location
1345                  */
1346                 velocity_rx_reset(vptr);
1347                 mac_rx_queue_run(regs);
1348                 mac_rx_queue_wake(regs);
1349 
1350                 mii_status = velocity_get_opt_media_mode(vptr);
1351                 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1352                         velocity_print_link_status(vptr);
1353                         if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1354                                 netif_wake_queue(netdev);
1355                 }
1356 
1357                 enable_flow_control_ability(vptr);
1358 
1359                 mac_clear_isr(regs);
1360                 writel(CR0_STOP, &regs->CR0Clr);
1361                 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1362                                                         &regs->CR0Set);
1363 
1364                 break;
1365 
1366         case VELOCITY_INIT_COLD:
1367         default:
1368                 /*
1369                  *      Do reset
1370                  */
1371                 velocity_soft_reset(vptr);
1372                 mdelay(5);
1373 
1374                 if (!vptr->no_eeprom) {
1375                         mac_eeprom_reload(regs);
1376                         for (i = 0; i < 6; i++)
1377                                 writeb(netdev->dev_addr[i], regs->PAR + i);
1378                 }
1379 
1380                 /*
1381                  *      clear Pre_ACPI bit.
1382                  */
1383                 BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1384                 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1385                 mac_set_dma_length(regs, vptr->options.DMA_length);
1386 
1387                 writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1388                 /*
1389                  *      Back off algorithm use original IEEE standard
1390                  */
1391                 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1392 
1393                 /*
1394                  *      Init CAM filter
1395                  */
1396                 velocity_init_cam_filter(vptr);
1397 
1398                 /*
1399                  *      Set packet filter: Receive directed and broadcast address
1400                  */
1401                 velocity_set_multi(netdev);
1402 
1403                 /*
1404                  *      Enable MII auto-polling
1405                  */
1406                 enable_mii_autopoll(regs);
1407 
1408                 setup_adaptive_interrupts(vptr);
1409 
1410                 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1411                 writew(vptr->options.numrx - 1, &regs->RDCSize);
1412                 mac_rx_queue_run(regs);
1413                 mac_rx_queue_wake(regs);
1414 
1415                 writew(vptr->options.numtx - 1, &regs->TDCSize);
1416 
1417                 for (i = 0; i < vptr->tx.numq; i++) {
1418                         writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1419                         mac_tx_queue_run(regs, i);
1420                 }
1421 
1422                 init_flow_control_register(vptr);
1423 
1424                 writel(CR0_STOP, &regs->CR0Clr);
1425                 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1426 
1427                 mii_status = velocity_get_opt_media_mode(vptr);
1428                 netif_stop_queue(netdev);
1429 
1430                 mii_init(vptr, mii_status);
1431 
1432                 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1433                         velocity_print_link_status(vptr);
1434                         if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1435                                 netif_wake_queue(netdev);
1436                 }
1437 
1438                 enable_flow_control_ability(vptr);
1439                 mac_hw_mibs_init(regs);
1440                 mac_write_int_mask(vptr->int_mask, regs);
1441                 mac_clear_isr(regs);
1442 
1443         }
1444 }
1445 
1446 static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1447 {
1448         struct mac_regs __iomem *regs = vptr->mac_regs;
1449         int avail, dirty, unusable;
1450 
1451         /*
1452          * RD number must be equal to 4X per hardware spec
1453          * (programming guide rev 1.20, p.13)
1454          */
1455         if (vptr->rx.filled < 4)
1456                 return;
1457 
1458         wmb();
1459 
1460         unusable = vptr->rx.filled & 0x0003;
1461         dirty = vptr->rx.dirty - unusable;
1462         for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1463                 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1464                 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1465         }
1466 
1467         writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1468         vptr->rx.filled = unusable;
1469 }
1470 
1471 /**
1472  *      velocity_init_dma_rings -       set up DMA rings
1473  *      @vptr: Velocity to set up
1474  *
1475  *      Allocate PCI mapped DMA rings for the receive and transmit layer
1476  *      to use.
1477  */
1478 static int velocity_init_dma_rings(struct velocity_info *vptr)
1479 {
1480         struct velocity_opt *opt = &vptr->options;
1481         const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1482         const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1483         dma_addr_t pool_dma;
1484         void *pool;
1485         unsigned int i;
1486 
1487         /*
1488          * Allocate all RD/TD rings a single pool.
1489          *
1490          * dma_alloc_coherent() fulfills the requirement for 64 bytes
1491          * alignment
1492          */
1493         pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1494                                     rx_ring_size, &pool_dma, GFP_ATOMIC);
1495         if (!pool) {
1496                 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1497                         vptr->netdev->name);
1498                 return -ENOMEM;
1499         }
1500 
1501         vptr->rx.ring = pool;
1502         vptr->rx.pool_dma = pool_dma;
1503 
1504         pool += rx_ring_size;
1505         pool_dma += rx_ring_size;
1506 
1507         for (i = 0; i < vptr->tx.numq; i++) {
1508                 vptr->tx.rings[i] = pool;
1509                 vptr->tx.pool_dma[i] = pool_dma;
1510                 pool += tx_ring_size;
1511                 pool_dma += tx_ring_size;
1512         }
1513 
1514         return 0;
1515 }
1516 
1517 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1518 {
1519         vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1520 }
1521 
1522 /**
1523  *      velocity_alloc_rx_buf   -       allocate aligned receive buffer
1524  *      @vptr: velocity
1525  *      @idx: ring index
1526  *
1527  *      Allocate a new full sized buffer for the reception of a frame and
1528  *      map it into PCI space for the hardware to use. The hardware
1529  *      requires *64* byte alignment of the buffer which makes life
1530  *      less fun than would be ideal.
1531  */
1532 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1533 {
1534         struct rx_desc *rd = &(vptr->rx.ring[idx]);
1535         struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1536 
1537         rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1538         if (rd_info->skb == NULL)
1539                 return -ENOMEM;
1540 
1541         /*
1542          *      Do the gymnastics to get the buffer head for data at
1543          *      64byte alignment.
1544          */
1545         skb_reserve(rd_info->skb,
1546                         64 - ((unsigned long) rd_info->skb->data & 63));
1547         rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1548                                         vptr->rx.buf_sz, DMA_FROM_DEVICE);
1549 
1550         /*
1551          *      Fill in the descriptor to match
1552          */
1553 
1554         *((u32 *) & (rd->rdesc0)) = 0;
1555         rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1556         rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1557         rd->pa_high = 0;
1558         return 0;
1559 }
1560 
1561 
1562 static int velocity_rx_refill(struct velocity_info *vptr)
1563 {
1564         int dirty = vptr->rx.dirty, done = 0;
1565 
1566         do {
1567                 struct rx_desc *rd = vptr->rx.ring + dirty;
1568 
1569                 /* Fine for an all zero Rx desc at init time as well */
1570                 if (rd->rdesc0.len & OWNED_BY_NIC)
1571                         break;
1572 
1573                 if (!vptr->rx.info[dirty].skb) {
1574                         if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1575                                 break;
1576                 }
1577                 done++;
1578                 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1579         } while (dirty != vptr->rx.curr);
1580 
1581         if (done) {
1582                 vptr->rx.dirty = dirty;
1583                 vptr->rx.filled += done;
1584         }
1585 
1586         return done;
1587 }
1588 
1589 /**
1590  *      velocity_free_rd_ring   -       free receive ring
1591  *      @vptr: velocity to clean up
1592  *
1593  *      Free the receive buffers for each ring slot and any
1594  *      attached socket buffers that need to go away.
1595  */
1596 static void velocity_free_rd_ring(struct velocity_info *vptr)
1597 {
1598         int i;
1599 
1600         if (vptr->rx.info == NULL)
1601                 return;
1602 
1603         for (i = 0; i < vptr->options.numrx; i++) {
1604                 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1605                 struct rx_desc *rd = vptr->rx.ring + i;
1606 
1607                 memset(rd, 0, sizeof(*rd));
1608 
1609                 if (!rd_info->skb)
1610                         continue;
1611                 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1612                                  DMA_FROM_DEVICE);
1613                 rd_info->skb_dma = 0;
1614 
1615                 dev_kfree_skb(rd_info->skb);
1616                 rd_info->skb = NULL;
1617         }
1618 
1619         kfree(vptr->rx.info);
1620         vptr->rx.info = NULL;
1621 }
1622 
1623 /**
1624  *      velocity_init_rd_ring   -       set up receive ring
1625  *      @vptr: velocity to configure
1626  *
1627  *      Allocate and set up the receive buffers for each ring slot and
1628  *      assign them to the network adapter.
1629  */
1630 static int velocity_init_rd_ring(struct velocity_info *vptr)
1631 {
1632         int ret = -ENOMEM;
1633 
1634         vptr->rx.info = kcalloc(vptr->options.numrx,
1635                                 sizeof(struct velocity_rd_info), GFP_KERNEL);
1636         if (!vptr->rx.info)
1637                 goto out;
1638 
1639         velocity_init_rx_ring_indexes(vptr);
1640 
1641         if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1642                 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1643                         "%s: failed to allocate RX buffer.\n", vptr->netdev->name);
1644                 velocity_free_rd_ring(vptr);
1645                 goto out;
1646         }
1647 
1648         ret = 0;
1649 out:
1650         return ret;
1651 }
1652 
1653 /**
1654  *      velocity_init_td_ring   -       set up transmit ring
1655  *      @vptr:  velocity
1656  *
1657  *      Set up the transmit ring and chain the ring pointers together.
1658  *      Returns zero on success or a negative posix errno code for
1659  *      failure.
1660  */
1661 static int velocity_init_td_ring(struct velocity_info *vptr)
1662 {
1663         int j;
1664 
1665         /* Init the TD ring entries */
1666         for (j = 0; j < vptr->tx.numq; j++) {
1667 
1668                 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1669                                             sizeof(struct velocity_td_info),
1670                                             GFP_KERNEL);
1671                 if (!vptr->tx.infos[j]) {
1672                         while (--j >= 0)
1673                                 kfree(vptr->tx.infos[j]);
1674                         return -ENOMEM;
1675                 }
1676 
1677                 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1678         }
1679         return 0;
1680 }
1681 
1682 /**
1683  *      velocity_free_dma_rings -       free PCI ring pointers
1684  *      @vptr: Velocity to free from
1685  *
1686  *      Clean up the PCI ring buffers allocated to this velocity.
1687  */
1688 static void velocity_free_dma_rings(struct velocity_info *vptr)
1689 {
1690         const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1691                 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1692 
1693         dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1694 }
1695 
1696 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1697 {
1698         int ret;
1699 
1700         velocity_set_rxbufsize(vptr, mtu);
1701 
1702         ret = velocity_init_dma_rings(vptr);
1703         if (ret < 0)
1704                 goto out;
1705 
1706         ret = velocity_init_rd_ring(vptr);
1707         if (ret < 0)
1708                 goto err_free_dma_rings_0;
1709 
1710         ret = velocity_init_td_ring(vptr);
1711         if (ret < 0)
1712                 goto err_free_rd_ring_1;
1713 out:
1714         return ret;
1715 
1716 err_free_rd_ring_1:
1717         velocity_free_rd_ring(vptr);
1718 err_free_dma_rings_0:
1719         velocity_free_dma_rings(vptr);
1720         goto out;
1721 }
1722 
1723 /**
1724  *      velocity_free_tx_buf    -       free transmit buffer
1725  *      @vptr: velocity
1726  *      @tdinfo: buffer
1727  *
1728  *      Release an transmit buffer. If the buffer was preallocated then
1729  *      recycle it, if not then unmap the buffer.
1730  */
1731 static void velocity_free_tx_buf(struct velocity_info *vptr,
1732                 struct velocity_td_info *tdinfo, struct tx_desc *td)
1733 {
1734         struct sk_buff *skb = tdinfo->skb;
1735 
1736         /*
1737          *      Don't unmap the pre-allocated tx_bufs
1738          */
1739         if (tdinfo->skb_dma) {
1740                 int i;
1741 
1742                 for (i = 0; i < tdinfo->nskb_dma; i++) {
1743                         size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1744 
1745                         /* For scatter-gather */
1746                         if (skb_shinfo(skb)->nr_frags > 0)
1747                                 pktlen = max_t(size_t, pktlen,
1748                                                 td->td_buf[i].size & ~TD_QUEUE);
1749 
1750                         dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1751                                         le16_to_cpu(pktlen), DMA_TO_DEVICE);
1752                 }
1753         }
1754         dev_kfree_skb_irq(skb);
1755         tdinfo->skb = NULL;
1756 }
1757 
1758 /*
1759  *      FIXME: could we merge this with velocity_free_tx_buf ?
1760  */
1761 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1762                                                          int q, int n)
1763 {
1764         struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1765         int i;
1766 
1767         if (td_info == NULL)
1768                 return;
1769 
1770         if (td_info->skb) {
1771                 for (i = 0; i < td_info->nskb_dma; i++) {
1772                         if (td_info->skb_dma[i]) {
1773                                 dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1774                                         td_info->skb->len, DMA_TO_DEVICE);
1775                                 td_info->skb_dma[i] = 0;
1776                         }
1777                 }
1778                 dev_kfree_skb(td_info->skb);
1779                 td_info->skb = NULL;
1780         }
1781 }
1782 
1783 /**
1784  *      velocity_free_td_ring   -       free td ring
1785  *      @vptr: velocity
1786  *
1787  *      Free up the transmit ring for this particular velocity adapter.
1788  *      We free the ring contents but not the ring itself.
1789  */
1790 static void velocity_free_td_ring(struct velocity_info *vptr)
1791 {
1792         int i, j;
1793 
1794         for (j = 0; j < vptr->tx.numq; j++) {
1795                 if (vptr->tx.infos[j] == NULL)
1796                         continue;
1797                 for (i = 0; i < vptr->options.numtx; i++)
1798                         velocity_free_td_ring_entry(vptr, j, i);
1799 
1800                 kfree(vptr->tx.infos[j]);
1801                 vptr->tx.infos[j] = NULL;
1802         }
1803 }
1804 
1805 static void velocity_free_rings(struct velocity_info *vptr)
1806 {
1807         velocity_free_td_ring(vptr);
1808         velocity_free_rd_ring(vptr);
1809         velocity_free_dma_rings(vptr);
1810 }
1811 
1812 /**
1813  *      velocity_error  -       handle error from controller
1814  *      @vptr: velocity
1815  *      @status: card status
1816  *
1817  *      Process an error report from the hardware and attempt to recover
1818  *      the card itself. At the moment we cannot recover from some
1819  *      theoretically impossible errors but this could be fixed using
1820  *      the pci_device_failed logic to bounce the hardware
1821  *
1822  */
1823 static void velocity_error(struct velocity_info *vptr, int status)
1824 {
1825 
1826         if (status & ISR_TXSTLI) {
1827                 struct mac_regs __iomem *regs = vptr->mac_regs;
1828 
1829                 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1830                 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1831                 writew(TRDCSR_RUN, &regs->TDCSRClr);
1832                 netif_stop_queue(vptr->netdev);
1833 
1834                 /* FIXME: port over the pci_device_failed code and use it
1835                    here */
1836         }
1837 
1838         if (status & ISR_SRCI) {
1839                 struct mac_regs __iomem *regs = vptr->mac_regs;
1840                 int linked;
1841 
1842                 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1843                         vptr->mii_status = check_connection_type(regs);
1844 
1845                         /*
1846                          *      If it is a 3119, disable frame bursting in
1847                          *      halfduplex mode and enable it in fullduplex
1848                          *       mode
1849                          */
1850                         if (vptr->rev_id < REV_ID_VT3216_A0) {
1851                                 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1852                                         BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1853                                 else
1854                                         BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1855                         }
1856                         /*
1857                          *      Only enable CD heart beat counter in 10HD mode
1858                          */
1859                         if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1860                                 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1861                         else
1862                                 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1863 
1864                         setup_queue_timers(vptr);
1865                 }
1866                 /*
1867                  *      Get link status from PHYSR0
1868                  */
1869                 linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1870 
1871                 if (linked) {
1872                         vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1873                         netif_carrier_on(vptr->netdev);
1874                 } else {
1875                         vptr->mii_status |= VELOCITY_LINK_FAIL;
1876                         netif_carrier_off(vptr->netdev);
1877                 }
1878 
1879                 velocity_print_link_status(vptr);
1880                 enable_flow_control_ability(vptr);
1881 
1882                 /*
1883                  *      Re-enable auto-polling because SRCI will disable
1884                  *      auto-polling
1885                  */
1886 
1887                 enable_mii_autopoll(regs);
1888 
1889                 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1890                         netif_stop_queue(vptr->netdev);
1891                 else
1892                         netif_wake_queue(vptr->netdev);
1893 
1894         }
1895         if (status & ISR_MIBFI)
1896                 velocity_update_hw_mibs(vptr);
1897         if (status & ISR_LSTEI)
1898                 mac_rx_queue_wake(vptr->mac_regs);
1899 }
1900 
1901 /**
1902  *      tx_srv          -       transmit interrupt service
1903  *      @vptr; Velocity
1904  *
1905  *      Scan the queues looking for transmitted packets that
1906  *      we can complete and clean up. Update any statistics as
1907  *      necessary/
1908  */
1909 static int velocity_tx_srv(struct velocity_info *vptr)
1910 {
1911         struct tx_desc *td;
1912         int qnum;
1913         int full = 0;
1914         int idx;
1915         int works = 0;
1916         struct velocity_td_info *tdinfo;
1917         struct net_device_stats *stats = &vptr->netdev->stats;
1918 
1919         for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1920                 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1921                         idx = (idx + 1) % vptr->options.numtx) {
1922 
1923                         /*
1924                          *      Get Tx Descriptor
1925                          */
1926                         td = &(vptr->tx.rings[qnum][idx]);
1927                         tdinfo = &(vptr->tx.infos[qnum][idx]);
1928 
1929                         if (td->tdesc0.len & OWNED_BY_NIC)
1930                                 break;
1931 
1932                         if ((works++ > 15))
1933                                 break;
1934 
1935                         if (td->tdesc0.TSR & TSR0_TERR) {
1936                                 stats->tx_errors++;
1937                                 stats->tx_dropped++;
1938                                 if (td->tdesc0.TSR & TSR0_CDH)
1939                                         stats->tx_heartbeat_errors++;
1940                                 if (td->tdesc0.TSR & TSR0_CRS)
1941                                         stats->tx_carrier_errors++;
1942                                 if (td->tdesc0.TSR & TSR0_ABT)
1943                                         stats->tx_aborted_errors++;
1944                                 if (td->tdesc0.TSR & TSR0_OWC)
1945                                         stats->tx_window_errors++;
1946                         } else {
1947                                 stats->tx_packets++;
1948                                 stats->tx_bytes += tdinfo->skb->len;
1949                         }
1950                         velocity_free_tx_buf(vptr, tdinfo, td);
1951                         vptr->tx.used[qnum]--;
1952                 }
1953                 vptr->tx.tail[qnum] = idx;
1954 
1955                 if (AVAIL_TD(vptr, qnum) < 1)
1956                         full = 1;
1957         }
1958         /*
1959          *      Look to see if we should kick the transmit network
1960          *      layer for more work.
1961          */
1962         if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1963             (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1964                 netif_wake_queue(vptr->netdev);
1965         }
1966         return works;
1967 }
1968 
1969 /**
1970  *      velocity_rx_csum        -       checksum process
1971  *      @rd: receive packet descriptor
1972  *      @skb: network layer packet buffer
1973  *
1974  *      Process the status bits for the received packet and determine
1975  *      if the checksum was computed and verified by the hardware
1976  */
1977 static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1978 {
1979         skb_checksum_none_assert(skb);
1980 
1981         if (rd->rdesc1.CSM & CSM_IPKT) {
1982                 if (rd->rdesc1.CSM & CSM_IPOK) {
1983                         if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1984                                         (rd->rdesc1.CSM & CSM_UDPKT)) {
1985                                 if (!(rd->rdesc1.CSM & CSM_TUPOK))
1986                                         return;
1987                         }
1988                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1989                 }
1990         }
1991 }
1992 
1993 /**
1994  *      velocity_rx_copy        -       in place Rx copy for small packets
1995  *      @rx_skb: network layer packet buffer candidate
1996  *      @pkt_size: received data size
1997  *      @rd: receive packet descriptor
1998  *      @dev: network device
1999  *
2000  *      Replace the current skb that is scheduled for Rx processing by a
2001  *      shorter, immediately allocated skb, if the received packet is small
2002  *      enough. This function returns a negative value if the received
2003  *      packet is too big or if memory is exhausted.
2004  */
2005 static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
2006                             struct velocity_info *vptr)
2007 {
2008         int ret = -1;
2009         if (pkt_size < rx_copybreak) {
2010                 struct sk_buff *new_skb;
2011 
2012                 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
2013                 if (new_skb) {
2014                         new_skb->ip_summed = rx_skb[0]->ip_summed;
2015                         skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2016                         *rx_skb = new_skb;
2017                         ret = 0;
2018                 }
2019 
2020         }
2021         return ret;
2022 }
2023 
2024 /**
2025  *      velocity_iph_realign    -       IP header alignment
2026  *      @vptr: velocity we are handling
2027  *      @skb: network layer packet buffer
2028  *      @pkt_size: received data size
2029  *
2030  *      Align IP header on a 2 bytes boundary. This behavior can be
2031  *      configured by the user.
2032  */
2033 static inline void velocity_iph_realign(struct velocity_info *vptr,
2034                                         struct sk_buff *skb, int pkt_size)
2035 {
2036         if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2037                 memmove(skb->data + 2, skb->data, pkt_size);
2038                 skb_reserve(skb, 2);
2039         }
2040 }
2041 
2042 /**
2043  *      velocity_receive_frame  -       received packet processor
2044  *      @vptr: velocity we are handling
2045  *      @idx: ring index
2046  *
2047  *      A packet has arrived. We process the packet and if appropriate
2048  *      pass the frame up the network stack
2049  */
2050 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2051 {
2052         struct net_device_stats *stats = &vptr->netdev->stats;
2053         struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2054         struct rx_desc *rd = &(vptr->rx.ring[idx]);
2055         int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2056         struct sk_buff *skb;
2057 
2058         if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2059                 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name);
2060                 stats->rx_length_errors++;
2061                 return -EINVAL;
2062         }
2063 
2064         if (rd->rdesc0.RSR & RSR_MAR)
2065                 stats->multicast++;
2066 
2067         skb = rd_info->skb;
2068 
2069         dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2070                                     vptr->rx.buf_sz, DMA_FROM_DEVICE);
2071 
2072         /*
2073          *      Drop frame not meeting IEEE 802.3
2074          */
2075 
2076         if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2077                 if (rd->rdesc0.RSR & RSR_RL) {
2078                         stats->rx_length_errors++;
2079                         return -EINVAL;
2080                 }
2081         }
2082 
2083         velocity_rx_csum(rd, skb);
2084 
2085         if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2086                 velocity_iph_realign(vptr, skb, pkt_len);
2087                 rd_info->skb = NULL;
2088                 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2089                                  DMA_FROM_DEVICE);
2090         } else {
2091                 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2092                                            vptr->rx.buf_sz, DMA_FROM_DEVICE);
2093         }
2094 
2095         skb_put(skb, pkt_len - 4);
2096         skb->protocol = eth_type_trans(skb, vptr->netdev);
2097 
2098         if (rd->rdesc0.RSR & RSR_DETAG) {
2099                 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2100 
2101                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2102         }
2103         netif_receive_skb(skb);
2104 
2105         stats->rx_bytes += pkt_len;
2106         stats->rx_packets++;
2107 
2108         return 0;
2109 }
2110 
2111 /**
2112  *      velocity_rx_srv         -       service RX interrupt
2113  *      @vptr: velocity
2114  *
2115  *      Walk the receive ring of the velocity adapter and remove
2116  *      any received packets from the receive queue. Hand the ring
2117  *      slots back to the adapter for reuse.
2118  */
2119 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2120 {
2121         struct net_device_stats *stats = &vptr->netdev->stats;
2122         int rd_curr = vptr->rx.curr;
2123         int works = 0;
2124 
2125         while (works < budget_left) {
2126                 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2127 
2128                 if (!vptr->rx.info[rd_curr].skb)
2129                         break;
2130 
2131                 if (rd->rdesc0.len & OWNED_BY_NIC)
2132                         break;
2133 
2134                 rmb();
2135 
2136                 /*
2137                  *      Don't drop CE or RL error frame although RXOK is off
2138                  */
2139                 if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2140                         if (velocity_receive_frame(vptr, rd_curr) < 0)
2141                                 stats->rx_dropped++;
2142                 } else {
2143                         if (rd->rdesc0.RSR & RSR_CRC)
2144                                 stats->rx_crc_errors++;
2145                         if (rd->rdesc0.RSR & RSR_FAE)
2146                                 stats->rx_frame_errors++;
2147 
2148                         stats->rx_dropped++;
2149                 }
2150 
2151                 rd->size |= RX_INTEN;
2152 
2153                 rd_curr++;
2154                 if (rd_curr >= vptr->options.numrx)
2155                         rd_curr = 0;
2156                 works++;
2157         }
2158 
2159         vptr->rx.curr = rd_curr;
2160 
2161         if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2162                 velocity_give_many_rx_descs(vptr);
2163 
2164         VAR_USED(stats);
2165         return works;
2166 }
2167 
2168 static int velocity_poll(struct napi_struct *napi, int budget)
2169 {
2170         struct velocity_info *vptr = container_of(napi,
2171                         struct velocity_info, napi);
2172         unsigned int rx_done;
2173         unsigned long flags;
2174 
2175         /*
2176          * Do rx and tx twice for performance (taken from the VIA
2177          * out-of-tree driver).
2178          */
2179         rx_done = velocity_rx_srv(vptr, budget);
2180         spin_lock_irqsave(&vptr->lock, flags);
2181         velocity_tx_srv(vptr);
2182         /* If budget not fully consumed, exit the polling mode */
2183         if (rx_done < budget) {
2184                 napi_complete(napi);
2185                 mac_enable_int(vptr->mac_regs);
2186         }
2187         spin_unlock_irqrestore(&vptr->lock, flags);
2188 
2189         return rx_done;
2190 }
2191 
2192 /**
2193  *      velocity_intr           -       interrupt callback
2194  *      @irq: interrupt number
2195  *      @dev_instance: interrupting device
2196  *
2197  *      Called whenever an interrupt is generated by the velocity
2198  *      adapter IRQ line. We may not be the source of the interrupt
2199  *      and need to identify initially if we are, and if not exit as
2200  *      efficiently as possible.
2201  */
2202 static irqreturn_t velocity_intr(int irq, void *dev_instance)
2203 {
2204         struct net_device *dev = dev_instance;
2205         struct velocity_info *vptr = netdev_priv(dev);
2206         u32 isr_status;
2207 
2208         spin_lock(&vptr->lock);
2209         isr_status = mac_read_isr(vptr->mac_regs);
2210 
2211         /* Not us ? */
2212         if (isr_status == 0) {
2213                 spin_unlock(&vptr->lock);
2214                 return IRQ_NONE;
2215         }
2216 
2217         /* Ack the interrupt */
2218         mac_write_isr(vptr->mac_regs, isr_status);
2219 
2220         if (likely(napi_schedule_prep(&vptr->napi))) {
2221                 mac_disable_int(vptr->mac_regs);
2222                 __napi_schedule(&vptr->napi);
2223         }
2224 
2225         if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2226                 velocity_error(vptr, isr_status);
2227 
2228         spin_unlock(&vptr->lock);
2229 
2230         return IRQ_HANDLED;
2231 }
2232 
2233 /**
2234  *      velocity_open           -       interface activation callback
2235  *      @dev: network layer device to open
2236  *
2237  *      Called when the network layer brings the interface up. Returns
2238  *      a negative posix error code on failure, or zero on success.
2239  *
2240  *      All the ring allocation and set up is done on open for this
2241  *      adapter to minimise memory usage when inactive
2242  */
2243 static int velocity_open(struct net_device *dev)
2244 {
2245         struct velocity_info *vptr = netdev_priv(dev);
2246         int ret;
2247 
2248         ret = velocity_init_rings(vptr, dev->mtu);
2249         if (ret < 0)
2250                 goto out;
2251 
2252         /* Ensure chip is running */
2253         velocity_set_power_state(vptr, PCI_D0);
2254 
2255         velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2256 
2257         ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2258                           dev->name, dev);
2259         if (ret < 0) {
2260                 /* Power down the chip */
2261                 velocity_set_power_state(vptr, PCI_D3hot);
2262                 velocity_free_rings(vptr);
2263                 goto out;
2264         }
2265 
2266         velocity_give_many_rx_descs(vptr);
2267 
2268         mac_enable_int(vptr->mac_regs);
2269         netif_start_queue(dev);
2270         napi_enable(&vptr->napi);
2271         vptr->flags |= VELOCITY_FLAGS_OPENED;
2272 out:
2273         return ret;
2274 }
2275 
2276 /**
2277  *      velocity_shutdown       -       shut down the chip
2278  *      @vptr: velocity to deactivate
2279  *
2280  *      Shuts down the internal operations of the velocity and
2281  *      disables interrupts, autopolling, transmit and receive
2282  */
2283 static void velocity_shutdown(struct velocity_info *vptr)
2284 {
2285         struct mac_regs __iomem *regs = vptr->mac_regs;
2286         mac_disable_int(regs);
2287         writel(CR0_STOP, &regs->CR0Set);
2288         writew(0xFFFF, &regs->TDCSRClr);
2289         writeb(0xFF, &regs->RDCSRClr);
2290         safe_disable_mii_autopoll(regs);
2291         mac_clear_isr(regs);
2292 }
2293 
2294 /**
2295  *      velocity_change_mtu     -       MTU change callback
2296  *      @dev: network device
2297  *      @new_mtu: desired MTU
2298  *
2299  *      Handle requests from the networking layer for MTU change on
2300  *      this interface. It gets called on a change by the network layer.
2301  *      Return zero for success or negative posix error code.
2302  */
2303 static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2304 {
2305         struct velocity_info *vptr = netdev_priv(dev);
2306         int ret = 0;
2307 
2308         if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2309                 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2310                                 vptr->netdev->name);
2311                 ret = -EINVAL;
2312                 goto out_0;
2313         }
2314 
2315         if (!netif_running(dev)) {
2316                 dev->mtu = new_mtu;
2317                 goto out_0;
2318         }
2319 
2320         if (dev->mtu != new_mtu) {
2321                 struct velocity_info *tmp_vptr;
2322                 unsigned long flags;
2323                 struct rx_info rx;
2324                 struct tx_info tx;
2325 
2326                 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2327                 if (!tmp_vptr) {
2328                         ret = -ENOMEM;
2329                         goto out_0;
2330                 }
2331 
2332                 tmp_vptr->netdev = dev;
2333                 tmp_vptr->pdev = vptr->pdev;
2334                 tmp_vptr->dev = vptr->dev;
2335                 tmp_vptr->options = vptr->options;
2336                 tmp_vptr->tx.numq = vptr->tx.numq;
2337 
2338                 ret = velocity_init_rings(tmp_vptr, new_mtu);
2339                 if (ret < 0)
2340                         goto out_free_tmp_vptr_1;
2341 
2342                 napi_disable(&vptr->napi);
2343 
2344                 spin_lock_irqsave(&vptr->lock, flags);
2345 
2346                 netif_stop_queue(dev);
2347                 velocity_shutdown(vptr);
2348 
2349                 rx = vptr->rx;
2350                 tx = vptr->tx;
2351 
2352                 vptr->rx = tmp_vptr->rx;
2353                 vptr->tx = tmp_vptr->tx;
2354 
2355                 tmp_vptr->rx = rx;
2356                 tmp_vptr->tx = tx;
2357 
2358                 dev->mtu = new_mtu;
2359 
2360                 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2361 
2362                 velocity_give_many_rx_descs(vptr);
2363 
2364                 napi_enable(&vptr->napi);
2365 
2366                 mac_enable_int(vptr->mac_regs);
2367                 netif_start_queue(dev);
2368 
2369                 spin_unlock_irqrestore(&vptr->lock, flags);
2370 
2371                 velocity_free_rings(tmp_vptr);
2372 
2373 out_free_tmp_vptr_1:
2374                 kfree(tmp_vptr);
2375         }
2376 out_0:
2377         return ret;
2378 }
2379 
2380 #ifdef CONFIG_NET_POLL_CONTROLLER
2381 /**
2382  *  velocity_poll_controller            -       Velocity Poll controller function
2383  *  @dev: network device
2384  *
2385  *
2386  *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
2387  *  with interrupts disabled.
2388  */
2389 static void velocity_poll_controller(struct net_device *dev)
2390 {
2391         disable_irq(dev->irq);
2392         velocity_intr(dev->irq, dev);
2393         enable_irq(dev->irq);
2394 }
2395 #endif
2396 
2397 /**
2398  *      velocity_mii_ioctl              -       MII ioctl handler
2399  *      @dev: network device
2400  *      @ifr: the ifreq block for the ioctl
2401  *      @cmd: the command
2402  *
2403  *      Process MII requests made via ioctl from the network layer. These
2404  *      are used by tools like kudzu to interrogate the link state of the
2405  *      hardware
2406  */
2407 static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2408 {
2409         struct velocity_info *vptr = netdev_priv(dev);
2410         struct mac_regs __iomem *regs = vptr->mac_regs;
2411         unsigned long flags;
2412         struct mii_ioctl_data *miidata = if_mii(ifr);
2413         int err;
2414 
2415         switch (cmd) {
2416         case SIOCGMIIPHY:
2417                 miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2418                 break;
2419         case SIOCGMIIREG:
2420                 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2421                         return -ETIMEDOUT;
2422                 break;
2423         case SIOCSMIIREG:
2424                 spin_lock_irqsave(&vptr->lock, flags);
2425                 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2426                 spin_unlock_irqrestore(&vptr->lock, flags);
2427                 check_connection_type(vptr->mac_regs);
2428                 if (err)
2429                         return err;
2430                 break;
2431         default:
2432                 return -EOPNOTSUPP;
2433         }
2434         return 0;
2435 }
2436 
2437 /**
2438  *      velocity_ioctl          -       ioctl entry point
2439  *      @dev: network device
2440  *      @rq: interface request ioctl
2441  *      @cmd: command code
2442  *
2443  *      Called when the user issues an ioctl request to the network
2444  *      device in question. The velocity interface supports MII.
2445  */
2446 static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2447 {
2448         struct velocity_info *vptr = netdev_priv(dev);
2449         int ret;
2450 
2451         /* If we are asked for information and the device is power
2452            saving then we need to bring the device back up to talk to it */
2453 
2454         if (!netif_running(dev))
2455                 velocity_set_power_state(vptr, PCI_D0);
2456 
2457         switch (cmd) {
2458         case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
2459         case SIOCGMIIREG:       /* Read MII PHY register. */
2460         case SIOCSMIIREG:       /* Write to MII PHY register. */
2461                 ret = velocity_mii_ioctl(dev, rq, cmd);
2462                 break;
2463 
2464         default:
2465                 ret = -EOPNOTSUPP;
2466         }
2467         if (!netif_running(dev))
2468                 velocity_set_power_state(vptr, PCI_D3hot);
2469 
2470 
2471         return ret;
2472 }
2473 
2474 /**
2475  *      velocity_get_status     -       statistics callback
2476  *      @dev: network device
2477  *
2478  *      Callback from the network layer to allow driver statistics
2479  *      to be resynchronized with hardware collected state. In the
2480  *      case of the velocity we need to pull the MIB counters from
2481  *      the hardware into the counters before letting the network
2482  *      layer display them.
2483  */
2484 static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2485 {
2486         struct velocity_info *vptr = netdev_priv(dev);
2487 
2488         /* If the hardware is down, don't touch MII */
2489         if (!netif_running(dev))
2490                 return &dev->stats;
2491 
2492         spin_lock_irq(&vptr->lock);
2493         velocity_update_hw_mibs(vptr);
2494         spin_unlock_irq(&vptr->lock);
2495 
2496         dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2497         dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2498         dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2499 
2500 //  unsigned long   rx_dropped;     /* no space in linux buffers    */
2501         dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2502         /* detailed rx_errors: */
2503 //  unsigned long   rx_length_errors;
2504 //  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2505         dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2506 //  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2507 //  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2508 //  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2509 
2510         /* detailed tx_errors */
2511 //  unsigned long   tx_fifo_errors;
2512 
2513         return &dev->stats;
2514 }
2515 
2516 /**
2517  *      velocity_close          -       close adapter callback
2518  *      @dev: network device
2519  *
2520  *      Callback from the network layer when the velocity is being
2521  *      deactivated by the network layer
2522  */
2523 static int velocity_close(struct net_device *dev)
2524 {
2525         struct velocity_info *vptr = netdev_priv(dev);
2526 
2527         napi_disable(&vptr->napi);
2528         netif_stop_queue(dev);
2529         velocity_shutdown(vptr);
2530 
2531         if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2532                 velocity_get_ip(vptr);
2533 
2534         free_irq(dev->irq, dev);
2535 
2536         velocity_free_rings(vptr);
2537 
2538         vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2539         return 0;
2540 }
2541 
2542 /**
2543  *      velocity_xmit           -       transmit packet callback
2544  *      @skb: buffer to transmit
2545  *      @dev: network device
2546  *
2547  *      Called by the networ layer to request a packet is queued to
2548  *      the velocity. Returns zero on success.
2549  */
2550 static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2551                                  struct net_device *dev)
2552 {
2553         struct velocity_info *vptr = netdev_priv(dev);
2554         int qnum = 0;
2555         struct tx_desc *td_ptr;
2556         struct velocity_td_info *tdinfo;
2557         unsigned long flags;
2558         int pktlen;
2559         int index, prev;
2560         int i = 0;
2561 
2562         if (skb_padto(skb, ETH_ZLEN))
2563                 goto out;
2564 
2565         /* The hardware can handle at most 7 memory segments, so merge
2566          * the skb if there are more */
2567         if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2568                 dev_kfree_skb_any(skb);
2569                 return NETDEV_TX_OK;
2570         }
2571 
2572         pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2573                         max_t(unsigned int, skb->len, ETH_ZLEN) :
2574                                 skb_headlen(skb);
2575 
2576         spin_lock_irqsave(&vptr->lock, flags);
2577 
2578         index = vptr->tx.curr[qnum];
2579         td_ptr = &(vptr->tx.rings[qnum][index]);
2580         tdinfo = &(vptr->tx.infos[qnum][index]);
2581 
2582         td_ptr->tdesc1.TCR = TCR0_TIC;
2583         td_ptr->td_buf[0].size &= ~TD_QUEUE;
2584 
2585         /*
2586          *      Map the linear network buffer into PCI space and
2587          *      add it to the transmit ring.
2588          */
2589         tdinfo->skb = skb;
2590         tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2591                                                                 DMA_TO_DEVICE);
2592         td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2593         td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2594         td_ptr->td_buf[0].pa_high = 0;
2595         td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2596 
2597         /* Handle fragments */
2598         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2599                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2600 
2601                 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2602                                                           frag, 0,
2603                                                           skb_frag_size(frag),
2604                                                           DMA_TO_DEVICE);
2605 
2606                 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2607                 td_ptr->td_buf[i + 1].pa_high = 0;
2608                 td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2609         }
2610         tdinfo->nskb_dma = i + 1;
2611 
2612         td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2613 
2614         if (vlan_tx_tag_present(skb)) {
2615                 td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2616                 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2617         }
2618 
2619         /*
2620          *      Handle hardware checksum
2621          */
2622         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2623                 const struct iphdr *ip = ip_hdr(skb);
2624                 if (ip->protocol == IPPROTO_TCP)
2625                         td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2626                 else if (ip->protocol == IPPROTO_UDP)
2627                         td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2628                 td_ptr->tdesc1.TCR |= TCR0_IPCK;
2629         }
2630 
2631         prev = index - 1;
2632         if (prev < 0)
2633                 prev = vptr->options.numtx - 1;
2634         td_ptr->tdesc0.len |= OWNED_BY_NIC;
2635         vptr->tx.used[qnum]++;
2636         vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2637 
2638         if (AVAIL_TD(vptr, qnum) < 1)
2639                 netif_stop_queue(dev);
2640 
2641         td_ptr = &(vptr->tx.rings[qnum][prev]);
2642         td_ptr->td_buf[0].size |= TD_QUEUE;
2643         mac_tx_queue_wake(vptr->mac_regs, qnum);
2644 
2645         spin_unlock_irqrestore(&vptr->lock, flags);
2646 out:
2647         return NETDEV_TX_OK;
2648 }
2649 
2650 static const struct net_device_ops velocity_netdev_ops = {
2651         .ndo_open               = velocity_open,
2652         .ndo_stop               = velocity_close,
2653         .ndo_start_xmit         = velocity_xmit,
2654         .ndo_get_stats          = velocity_get_stats,
2655         .ndo_validate_addr      = eth_validate_addr,
2656         .ndo_set_mac_address    = eth_mac_addr,
2657         .ndo_set_rx_mode        = velocity_set_multi,
2658         .ndo_change_mtu         = velocity_change_mtu,
2659         .ndo_do_ioctl           = velocity_ioctl,
2660         .ndo_vlan_rx_add_vid    = velocity_vlan_rx_add_vid,
2661         .ndo_vlan_rx_kill_vid   = velocity_vlan_rx_kill_vid,
2662 #ifdef CONFIG_NET_POLL_CONTROLLER
2663         .ndo_poll_controller = velocity_poll_controller,
2664 #endif
2665 };
2666 
2667 /**
2668  *      velocity_init_info      -       init private data
2669  *      @pdev: PCI device
2670  *      @vptr: Velocity info
2671  *      @info: Board type
2672  *
2673  *      Set up the initial velocity_info struct for the device that has been
2674  *      discovered.
2675  */
2676 static void velocity_init_info(struct velocity_info *vptr,
2677                                 const struct velocity_info_tbl *info)
2678 {
2679         vptr->chip_id = info->chip_id;
2680         vptr->tx.numq = info->txqueue;
2681         vptr->multicast_limit = MCAM_SIZE;
2682         spin_lock_init(&vptr->lock);
2683 }
2684 
2685 /**
2686  *      velocity_get_pci_info   -       retrieve PCI info for device
2687  *      @vptr: velocity device
2688  *      @pdev: PCI device it matches
2689  *
2690  *      Retrieve the PCI configuration space data that interests us from
2691  *      the kernel PCI layer
2692  */
2693 static int velocity_get_pci_info(struct velocity_info *vptr)
2694 {
2695         struct pci_dev *pdev = vptr->pdev;
2696 
2697         pci_set_master(pdev);
2698 
2699         vptr->ioaddr = pci_resource_start(pdev, 0);
2700         vptr->memaddr = pci_resource_start(pdev, 1);
2701 
2702         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2703                 dev_err(&pdev->dev,
2704                            "region #0 is not an I/O resource, aborting.\n");
2705                 return -EINVAL;
2706         }
2707 
2708         if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2709                 dev_err(&pdev->dev,
2710                            "region #1 is an I/O resource, aborting.\n");
2711                 return -EINVAL;
2712         }
2713 
2714         if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2715                 dev_err(&pdev->dev, "region #1 is too small.\n");
2716                 return -EINVAL;
2717         }
2718 
2719         return 0;
2720 }
2721 
2722 /**
2723  *      velocity_get_platform_info - retrieve platform info for device
2724  *      @vptr: velocity device
2725  *      @pdev: platform device it matches
2726  *
2727  *      Retrieve the Platform configuration data that interests us
2728  */
2729 static int velocity_get_platform_info(struct velocity_info *vptr)
2730 {
2731         struct resource res;
2732         int ret;
2733 
2734         if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2735                 vptr->no_eeprom = 1;
2736 
2737         ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2738         if (ret) {
2739                 dev_err(vptr->dev, "unable to find memory address\n");
2740                 return ret;
2741         }
2742 
2743         vptr->memaddr = res.start;
2744 
2745         if (resource_size(&res) < VELOCITY_IO_SIZE) {
2746                 dev_err(vptr->dev, "memory region is too small.\n");
2747                 return -EINVAL;
2748         }
2749 
2750         return 0;
2751 }
2752 
2753 /**
2754  *      velocity_print_info     -       per driver data
2755  *      @vptr: velocity
2756  *
2757  *      Print per driver data as the kernel driver finds Velocity
2758  *      hardware
2759  */
2760 static void velocity_print_info(struct velocity_info *vptr)
2761 {
2762         struct net_device *dev = vptr->netdev;
2763 
2764         printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2765         printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2766                 dev->name, dev->dev_addr);
2767 }
2768 
2769 static u32 velocity_get_link(struct net_device *dev)
2770 {
2771         struct velocity_info *vptr = netdev_priv(dev);
2772         struct mac_regs __iomem *regs = vptr->mac_regs;
2773         return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2774 }
2775 
2776 /**
2777  *      velocity_probe - set up discovered velocity device
2778  *      @pdev: PCI device
2779  *      @ent: PCI device table entry that matched
2780  *      @bustype: bus that device is connected to
2781  *
2782  *      Configure a discovered adapter from scratch. Return a negative
2783  *      errno error code on failure paths.
2784  */
2785 static int velocity_probe(struct device *dev, int irq,
2786                            const struct velocity_info_tbl *info,
2787                            enum velocity_bus_type bustype)
2788 {
2789         static int first = 1;
2790         struct net_device *netdev;
2791         int i;
2792         const char *drv_string;
2793         struct velocity_info *vptr;
2794         struct mac_regs __iomem *regs;
2795         int ret = -ENOMEM;
2796 
2797         /* FIXME: this driver, like almost all other ethernet drivers,
2798          * can support more than MAX_UNITS.
2799          */
2800         if (velocity_nics >= MAX_UNITS) {
2801                 dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2802                 return -ENODEV;
2803         }
2804 
2805         netdev = alloc_etherdev(sizeof(struct velocity_info));
2806         if (!netdev)
2807                 goto out;
2808 
2809         /* Chain it all together */
2810 
2811         SET_NETDEV_DEV(netdev, dev);
2812         vptr = netdev_priv(netdev);
2813 
2814         if (first) {
2815                 printk(KERN_INFO "%s Ver. %s\n",
2816                         VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2817                 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2818                 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2819                 first = 0;
2820         }
2821 
2822         netdev->irq = irq;
2823         vptr->netdev = netdev;
2824         vptr->dev = dev;
2825 
2826         velocity_init_info(vptr, info);
2827 
2828         if (bustype == BUS_PCI) {
2829                 vptr->pdev = to_pci_dev(dev);
2830 
2831                 ret = velocity_get_pci_info(vptr);
2832                 if (ret < 0)
2833                         goto err_free_dev;
2834         } else {
2835                 vptr->pdev = NULL;
2836                 ret = velocity_get_platform_info(vptr);
2837                 if (ret < 0)
2838                         goto err_free_dev;
2839         }
2840 
2841         regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2842         if (regs == NULL) {
2843                 ret = -EIO;
2844                 goto err_free_dev;
2845         }
2846 
2847         vptr->mac_regs = regs;
2848         vptr->rev_id = readb(&regs->rev_id);
2849 
2850         mac_wol_reset(regs);
2851 
2852         for (i = 0; i < 6; i++)
2853                 netdev->dev_addr[i] = readb(&regs->PAR[i]);
2854 
2855 
2856         drv_string = dev_driver_string(dev);
2857 
2858         velocity_get_options(&vptr->options, velocity_nics, drv_string);
2859 
2860         /*
2861          *      Mask out the options cannot be set to the chip
2862          */
2863 
2864         vptr->options.flags &= info->flags;
2865 
2866         /*
2867          *      Enable the chip specified capbilities
2868          */
2869 
2870         vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2871 
2872         vptr->wol_opts = vptr->options.wol_opts;
2873         vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2874 
2875         vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2876 
2877         netdev->netdev_ops = &velocity_netdev_ops;
2878         netdev->ethtool_ops = &velocity_ethtool_ops;
2879         netif_napi_add(netdev, &vptr->napi, velocity_poll,
2880                                                         VELOCITY_NAPI_WEIGHT);
2881 
2882         netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2883                            NETIF_F_HW_VLAN_CTAG_TX;
2884         netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2885                         NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2886                         NETIF_F_IP_CSUM;
2887 
2888         ret = register_netdev(netdev);
2889         if (ret < 0)
2890                 goto err_iounmap;
2891 
2892         if (!velocity_get_link(netdev)) {
2893                 netif_carrier_off(netdev);
2894                 vptr->mii_status |= VELOCITY_LINK_FAIL;
2895         }
2896 
2897         velocity_print_info(vptr);
2898         dev_set_drvdata(vptr->dev, netdev);
2899 
2900         /* and leave the chip powered down */
2901 
2902         velocity_set_power_state(vptr, PCI_D3hot);
2903         velocity_nics++;
2904 out:
2905         return ret;
2906 
2907 err_iounmap:
2908         netif_napi_del(&vptr->napi);
2909         iounmap(regs);
2910 err_free_dev:
2911         free_netdev(netdev);
2912         goto out;
2913 }
2914 
2915 /**
2916  *      velocity_remove - device unplug
2917  *      @dev: device being removed
2918  *
2919  *      Device unload callback. Called on an unplug or on module
2920  *      unload for each active device that is present. Disconnects
2921  *      the device from the network layer and frees all the resources
2922  */
2923 static int velocity_remove(struct device *dev)
2924 {
2925         struct net_device *netdev = dev_get_drvdata(dev);
2926         struct velocity_info *vptr = netdev_priv(netdev);
2927 
2928         unregister_netdev(netdev);
2929         netif_napi_del(&vptr->napi);
2930         iounmap(vptr->mac_regs);
2931         free_netdev(netdev);
2932         velocity_nics--;
2933 
2934         return 0;
2935 }
2936 
2937 static int velocity_pci_probe(struct pci_dev *pdev,
2938                                const struct pci_device_id *ent)
2939 {
2940         const struct velocity_info_tbl *info =
2941                                         &chip_info_table[ent->driver_data];
2942         int ret;
2943 
2944         ret = pci_enable_device(pdev);
2945         if (ret < 0)
2946                 return ret;
2947 
2948         ret = pci_request_regions(pdev, VELOCITY_NAME);
2949         if (ret < 0) {
2950                 dev_err(&pdev->dev, "No PCI resources.\n");
2951                 goto fail1;
2952         }
2953 
2954         ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2955         if (ret == 0)
2956                 return 0;
2957 
2958         pci_release_regions(pdev);
2959 fail1:
2960         pci_disable_device(pdev);
2961         return ret;
2962 }
2963 
2964 static void velocity_pci_remove(struct pci_dev *pdev)
2965 {
2966         velocity_remove(&pdev->dev);
2967 
2968         pci_release_regions(pdev);
2969         pci_disable_device(pdev);
2970 }
2971 
2972 static int velocity_platform_probe(struct platform_device *pdev)
2973 {
2974         const struct of_device_id *of_id;
2975         const struct velocity_info_tbl *info;
2976         int irq;
2977 
2978         of_id = of_match_device(velocity_of_ids, &pdev->dev);
2979         if (!of_id)
2980                 return -EINVAL;
2981         info = of_id->data;
2982 
2983         irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2984         if (!irq)
2985                 return -EINVAL;
2986 
2987         return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2988 }
2989 
2990 static int velocity_platform_remove(struct platform_device *pdev)
2991 {
2992         velocity_remove(&pdev->dev);
2993 
2994         return 0;
2995 }
2996 
2997 #ifdef CONFIG_PM_SLEEP
2998 /**
2999  *      wol_calc_crc            -       WOL CRC
3000  *      @pattern: data pattern
3001  *      @mask_pattern: mask
3002  *
3003  *      Compute the wake on lan crc hashes for the packet header
3004  *      we are interested in.
3005  */
3006 static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
3007 {
3008         u16 crc = 0xFFFF;
3009         u8 mask;
3010         int i, j;
3011 
3012         for (i = 0; i < size; i++) {
3013                 mask = mask_pattern[i];
3014 
3015                 /* Skip this loop if the mask equals to zero */
3016                 if (mask == 0x00)
3017                         continue;
3018 
3019                 for (j = 0; j < 8; j++) {
3020                         if ((mask & 0x01) == 0) {
3021                                 mask >>= 1;
3022                                 continue;
3023                         }
3024                         mask >>= 1;
3025                         crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
3026                 }
3027         }
3028         /*      Finally, invert the result once to get the correct data */
3029         crc = ~crc;
3030         return bitrev32(crc) >> 16;
3031 }
3032 
3033 /**
3034  *      velocity_set_wol        -       set up for wake on lan
3035  *      @vptr: velocity to set WOL status on
3036  *
3037  *      Set a card up for wake on lan either by unicast or by
3038  *      ARP packet.
3039  *
3040  *      FIXME: check static buffer is safe here
3041  */
3042 static int velocity_set_wol(struct velocity_info *vptr)
3043 {
3044         struct mac_regs __iomem *regs = vptr->mac_regs;
3045         enum speed_opt spd_dpx = vptr->options.spd_dpx;
3046         static u8 buf[256];
3047         int i;
3048 
3049         static u32 mask_pattern[2][4] = {
3050                 {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3051                 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}  /* Magic Packet */
3052         };
3053 
3054         writew(0xFFFF, &regs->WOLCRClr);
3055         writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3056         writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3057 
3058         /*
3059            if (vptr->wol_opts & VELOCITY_WOL_PHY)
3060            writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3061          */
3062 
3063         if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3064                 writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3065 
3066         if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3067                 struct arp_packet *arp = (struct arp_packet *) buf;
3068                 u16 crc;
3069                 memset(buf, 0, sizeof(struct arp_packet) + 7);
3070 
3071                 for (i = 0; i < 4; i++)
3072                         writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3073 
3074                 arp->type = htons(ETH_P_ARP);
3075                 arp->ar_op = htons(1);
3076 
3077                 memcpy(arp->ar_tip, vptr->ip_addr, 4);
3078 
3079                 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3080                                 (u8 *) & mask_pattern[0][0]);
3081 
3082                 writew(crc, &regs->PatternCRC[0]);
3083                 writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3084         }
3085 
3086         BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3087         BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3088 
3089         writew(0x0FFF, &regs->WOLSRClr);
3090 
3091         if (spd_dpx == SPD_DPX_1000_FULL)
3092                 goto mac_done;
3093 
3094         if (spd_dpx != SPD_DPX_AUTO)
3095                 goto advertise_done;
3096 
3097         if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3098                 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3099                         MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3100 
3101                 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3102         }
3103 
3104         if (vptr->mii_status & VELOCITY_SPEED_1000)
3105                 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3106 
3107 advertise_done:
3108         BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3109 
3110         {
3111                 u8 GCR;
3112                 GCR = readb(&regs->CHIPGCR);
3113                 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3114                 writeb(GCR, &regs->CHIPGCR);
3115         }
3116 
3117 mac_done:
3118         BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3119         /* Turn on SWPTAG just before entering power mode */
3120         BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3121         /* Go to bed ..... */
3122         BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3123 
3124         return 0;
3125 }
3126 
3127 /**
3128  *      velocity_save_context   -       save registers
3129  *      @vptr: velocity
3130  *      @context: buffer for stored context
3131  *
3132  *      Retrieve the current configuration from the velocity hardware
3133  *      and stash it in the context structure, for use by the context
3134  *      restore functions. This allows us to save things we need across
3135  *      power down states
3136  */
3137 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3138 {
3139         struct mac_regs __iomem *regs = vptr->mac_regs;
3140         u16 i;
3141         u8 __iomem *ptr = (u8 __iomem *)regs;
3142 
3143         for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3144                 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3145 
3146         for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3147                 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3148 
3149         for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3150                 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3151 
3152 }
3153 
3154 static int velocity_suspend(struct device *dev)
3155 {
3156         struct net_device *netdev = dev_get_drvdata(dev);
3157         struct velocity_info *vptr = netdev_priv(netdev);
3158         unsigned long flags;
3159 
3160         if (!netif_running(vptr->netdev))
3161                 return 0;
3162 
3163         netif_device_detach(vptr->netdev);
3164 
3165         spin_lock_irqsave(&vptr->lock, flags);
3166         if (vptr->pdev)
3167                 pci_save_state(vptr->pdev);
3168 
3169         if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3170                 velocity_get_ip(vptr);
3171                 velocity_save_context(vptr, &vptr->context);
3172                 velocity_shutdown(vptr);
3173                 velocity_set_wol(vptr);
3174                 if (vptr->pdev)
3175                         pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3176                 velocity_set_power_state(vptr, PCI_D3hot);
3177         } else {
3178                 velocity_save_context(vptr, &vptr->context);
3179                 velocity_shutdown(vptr);
3180                 if (vptr->pdev)
3181                         pci_disable_device(vptr->pdev);
3182                 velocity_set_power_state(vptr, PCI_D3hot);
3183         }
3184 
3185         spin_unlock_irqrestore(&vptr->lock, flags);
3186         return 0;
3187 }
3188 
3189 /**
3190  *      velocity_restore_context        -       restore registers
3191  *      @vptr: velocity
3192  *      @context: buffer for stored context
3193  *
3194  *      Reload the register configuration from the velocity context
3195  *      created by velocity_save_context.
3196  */
3197 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3198 {
3199         struct mac_regs __iomem *regs = vptr->mac_regs;
3200         int i;
3201         u8 __iomem *ptr = (u8 __iomem *)regs;
3202 
3203         for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3204                 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3205 
3206         /* Just skip cr0 */
3207         for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3208                 /* Clear */
3209                 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3210                 /* Set */
3211                 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3212         }
3213 
3214         for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3215                 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3216 
3217         for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3218                 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3219 
3220         for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3221                 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3222 }
3223 
3224 static int velocity_resume(struct device *dev)
3225 {
3226         struct net_device *netdev = dev_get_drvdata(dev);
3227         struct velocity_info *vptr = netdev_priv(netdev);
3228         unsigned long flags;
3229         int i;
3230 
3231         if (!netif_running(vptr->netdev))
3232                 return 0;
3233 
3234         velocity_set_power_state(vptr, PCI_D0);
3235 
3236         if (vptr->pdev) {
3237                 pci_enable_wake(vptr->pdev, PCI_D0, 0);
3238                 pci_restore_state(vptr->pdev);
3239         }
3240 
3241         mac_wol_reset(vptr->mac_regs);
3242 
3243         spin_lock_irqsave(&vptr->lock, flags);
3244         velocity_restore_context(vptr, &vptr->context);
3245         velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3246         mac_disable_int(vptr->mac_regs);
3247 
3248         velocity_tx_srv(vptr);
3249 
3250         for (i = 0; i < vptr->tx.numq; i++) {
3251                 if (vptr->tx.used[i])
3252                         mac_tx_queue_wake(vptr->mac_regs, i);
3253         }
3254 
3255         mac_enable_int(vptr->mac_regs);
3256         spin_unlock_irqrestore(&vptr->lock, flags);
3257         netif_device_attach(vptr->netdev);
3258 
3259         return 0;
3260 }
3261 #endif  /* CONFIG_PM_SLEEP */
3262 
3263 static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3264 
3265 /*
3266  *      Definition for our device driver. The PCI layer interface
3267  *      uses this to handle all our card discover and plugging
3268  */
3269 static struct pci_driver velocity_pci_driver = {
3270         .name           = VELOCITY_NAME,
3271         .id_table       = velocity_pci_id_table,
3272         .probe          = velocity_pci_probe,
3273         .remove         = velocity_pci_remove,
3274         .driver = {
3275                 .pm = &velocity_pm_ops,
3276         },
3277 };
3278 
3279 static struct platform_driver velocity_platform_driver = {
3280         .probe          = velocity_platform_probe,
3281         .remove         = velocity_platform_remove,
3282         .driver = {
3283                 .name = "via-velocity",
3284                 .owner = THIS_MODULE,
3285                 .of_match_table = velocity_of_ids,
3286                 .pm = &velocity_pm_ops,
3287         },
3288 };
3289 
3290 /**
3291  *      velocity_ethtool_up     -       pre hook for ethtool
3292  *      @dev: network device
3293  *
3294  *      Called before an ethtool operation. We need to make sure the
3295  *      chip is out of D3 state before we poke at it.
3296  */
3297 static int velocity_ethtool_up(struct net_device *dev)
3298 {
3299         struct velocity_info *vptr = netdev_priv(dev);
3300         if (!netif_running(dev))
3301                 velocity_set_power_state(vptr, PCI_D0);
3302         return 0;
3303 }
3304 
3305 /**
3306  *      velocity_ethtool_down   -       post hook for ethtool
3307  *      @dev: network device
3308  *
3309  *      Called after an ethtool operation. Restore the chip back to D3
3310  *      state if it isn't running.
3311  */
3312 static void velocity_ethtool_down(struct net_device *dev)
3313 {
3314         struct velocity_info *vptr = netdev_priv(dev);
3315         if (!netif_running(dev))
3316                 velocity_set_power_state(vptr, PCI_D3hot);
3317 }
3318 
3319 static int velocity_get_settings(struct net_device *dev,
3320                                  struct ethtool_cmd *cmd)
3321 {
3322         struct velocity_info *vptr = netdev_priv(dev);
3323         struct mac_regs __iomem *regs = vptr->mac_regs;
3324         u32 status;
3325         status = check_connection_type(vptr->mac_regs);
3326 
3327         cmd->supported = SUPPORTED_TP |
3328                         SUPPORTED_Autoneg |
3329                         SUPPORTED_10baseT_Half |
3330                         SUPPORTED_10baseT_Full |
3331                         SUPPORTED_100baseT_Half |
3332                         SUPPORTED_100baseT_Full |
3333                         SUPPORTED_1000baseT_Half |
3334                         SUPPORTED_1000baseT_Full;
3335 
3336         cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3337         if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3338                 cmd->advertising |=
3339                         ADVERTISED_10baseT_Half |
3340                         ADVERTISED_10baseT_Full |
3341                         ADVERTISED_100baseT_Half |
3342                         ADVERTISED_100baseT_Full |
3343                         ADVERTISED_1000baseT_Half |
3344                         ADVERTISED_1000baseT_Full;
3345         } else {
3346                 switch (vptr->options.spd_dpx) {
3347                 case SPD_DPX_1000_FULL:
3348                         cmd->advertising |= ADVERTISED_1000baseT_Full;
3349                         break;
3350                 case SPD_DPX_100_HALF:
3351                         cmd->advertising |= ADVERTISED_100baseT_Half;
3352                         break;
3353                 case SPD_DPX_100_FULL:
3354                         cmd->advertising |= ADVERTISED_100baseT_Full;
3355                         break;
3356                 case SPD_DPX_10_HALF:
3357                         cmd->advertising |= ADVERTISED_10baseT_Half;
3358                         break;
3359                 case SPD_DPX_10_FULL:
3360                         cmd->advertising |= ADVERTISED_10baseT_Full;
3361                         break;
3362                 default:
3363                         break;
3364                 }
3365         }
3366 
3367         if (status & VELOCITY_SPEED_1000)
3368                 ethtool_cmd_speed_set(cmd, SPEED_1000);
3369         else if (status & VELOCITY_SPEED_100)
3370                 ethtool_cmd_speed_set(cmd, SPEED_100);
3371         else
3372                 ethtool_cmd_speed_set(cmd, SPEED_10);
3373 
3374         cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3375         cmd->port = PORT_TP;
3376         cmd->transceiver = XCVR_INTERNAL;
3377         cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3378 
3379         if (status & VELOCITY_DUPLEX_FULL)
3380                 cmd->duplex = DUPLEX_FULL;
3381         else
3382                 cmd->duplex = DUPLEX_HALF;
3383 
3384         return 0;
3385 }
3386 
3387 static int velocity_set_settings(struct net_device *dev,
3388                                  struct ethtool_cmd *cmd)
3389 {
3390         struct velocity_info *vptr = netdev_priv(dev);
3391         u32 speed = ethtool_cmd_speed(cmd);
3392         u32 curr_status;
3393         u32 new_status = 0;
3394         int ret = 0;
3395 
3396         curr_status = check_connection_type(vptr->mac_regs);
3397         curr_status &= (~VELOCITY_LINK_FAIL);
3398 
3399         new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3400         new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3401         new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3402         new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3403         new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3404 
3405         if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3406             (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3407                 ret = -EINVAL;
3408         } else {
3409                 enum speed_opt spd_dpx;
3410 
3411                 if (new_status & VELOCITY_AUTONEG_ENABLE)
3412                         spd_dpx = SPD_DPX_AUTO;
3413                 else if ((new_status & VELOCITY_SPEED_1000) &&
3414                          (new_status & VELOCITY_DUPLEX_FULL)) {
3415                         spd_dpx = SPD_DPX_1000_FULL;
3416                 } else if (new_status & VELOCITY_SPEED_100)
3417                         spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3418                                 SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3419                 else if (new_status & VELOCITY_SPEED_10)
3420                         spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3421                                 SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3422                 else
3423                         return -EOPNOTSUPP;
3424 
3425                 vptr->options.spd_dpx = spd_dpx;
3426 
3427                 velocity_set_media_mode(vptr, new_status);
3428         }
3429 
3430         return ret;
3431 }
3432 
3433 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3434 {
3435         struct velocity_info *vptr = netdev_priv(dev);
3436 
3437         strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3438         strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3439         if (vptr->pdev)
3440                 strlcpy(info->bus_info, pci_name(vptr->pdev),
3441                                                 sizeof(info->bus_info));
3442         else
3443                 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3444 }
3445 
3446 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3447 {
3448         struct velocity_info *vptr = netdev_priv(dev);
3449         wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3450         wol->wolopts |= WAKE_MAGIC;
3451         /*
3452            if (vptr->wol_opts & VELOCITY_WOL_PHY)
3453                    wol.wolopts|=WAKE_PHY;
3454                          */
3455         if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3456                 wol->wolopts |= WAKE_UCAST;
3457         if (vptr->wol_opts & VELOCITY_WOL_ARP)
3458                 wol->wolopts |= WAKE_ARP;
3459         memcpy(&wol->sopass, vptr->wol_passwd, 6);
3460 }
3461 
3462 static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3463 {
3464         struct velocity_info *vptr = netdev_priv(dev);
3465 
3466         if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3467                 return -EFAULT;
3468         vptr->wol_opts = VELOCITY_WOL_MAGIC;
3469 
3470         /*
3471            if (wol.wolopts & WAKE_PHY) {
3472            vptr->wol_opts|=VELOCITY_WOL_PHY;
3473            vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3474            }
3475          */
3476 
3477         if (wol->wolopts & WAKE_MAGIC) {
3478                 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3479                 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3480         }
3481         if (wol->wolopts & WAKE_UCAST) {
3482                 vptr->wol_opts |= VELOCITY_WOL_UCAST;
3483                 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3484         }
3485         if (wol->wolopts & WAKE_ARP) {
3486                 vptr->wol_opts |= VELOCITY_WOL_ARP;
3487                 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3488         }
3489         memcpy(vptr->wol_passwd, wol->sopass, 6);
3490         return 0;
3491 }
3492 
3493 static u32 velocity_get_msglevel(struct net_device *dev)
3494 {
3495         return msglevel;
3496 }
3497 
3498 static void velocity_set_msglevel(struct net_device *dev, u32 value)
3499 {
3500          msglevel = value;
3501 }
3502 
3503 static int get_pending_timer_val(int val)
3504 {
3505         int mult_bits = val >> 6;
3506         int mult = 1;
3507 
3508         switch (mult_bits)
3509         {
3510         case 1:
3511                 mult = 4; break;
3512         case 2:
3513                 mult = 16; break;
3514         case 3:
3515                 mult = 64; break;
3516         case 0:
3517         default:
3518                 break;
3519         }
3520 
3521         return (val & 0x3f) * mult;
3522 }
3523 
3524 static void set_pending_timer_val(int *val, u32 us)
3525 {
3526         u8 mult = 0;
3527         u8 shift = 0;
3528 
3529         if (us >= 0x3f) {
3530                 mult = 1; /* mult with 4 */
3531                 shift = 2;
3532         }
3533         if (us >= 0x3f * 4) {
3534                 mult = 2; /* mult with 16 */
3535                 shift = 4;
3536         }
3537         if (us >= 0x3f * 16) {
3538                 mult = 3; /* mult with 64 */
3539                 shift = 6;
3540         }
3541 
3542         *val = (mult << 6) | ((us >> shift) & 0x3f);
3543 }
3544 
3545 
3546 static int velocity_get_coalesce(struct net_device *dev,
3547                 struct ethtool_coalesce *ecmd)
3548 {
3549         struct velocity_info *vptr = netdev_priv(dev);
3550 
3551         ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3552         ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3553 
3554         ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3555         ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3556 
3557         return 0;
3558 }
3559 
3560 static int velocity_set_coalesce(struct net_device *dev,
3561                 struct ethtool_coalesce *ecmd)
3562 {
3563         struct velocity_info *vptr = netdev_priv(dev);
3564         int max_us = 0x3f * 64;
3565         unsigned long flags;
3566 
3567         /* 6 bits of  */
3568         if (ecmd->tx_coalesce_usecs > max_us)
3569                 return -EINVAL;
3570         if (ecmd->rx_coalesce_usecs > max_us)
3571                 return -EINVAL;
3572 
3573         if (ecmd->tx_max_coalesced_frames > 0xff)
3574                 return -EINVAL;
3575         if (ecmd->rx_max_coalesced_frames > 0xff)
3576                 return -EINVAL;
3577 
3578         vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3579         vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3580 
3581         set_pending_timer_val(&vptr->options.rxqueue_timer,
3582                         ecmd->rx_coalesce_usecs);
3583         set_pending_timer_val(&vptr->options.txqueue_timer,
3584                         ecmd->tx_coalesce_usecs);
3585 
3586         /* Setup the interrupt suppression and queue timers */
3587         spin_lock_irqsave(&vptr->lock, flags);
3588         mac_disable_int(vptr->mac_regs);
3589         setup_adaptive_interrupts(vptr);
3590         setup_queue_timers(vptr);
3591 
3592         mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3593         mac_clear_isr(vptr->mac_regs);
3594         mac_enable_int(vptr->mac_regs);
3595         spin_unlock_irqrestore(&vptr->lock, flags);
3596 
3597         return 0;
3598 }
3599 
3600 static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3601         "rx_all",
3602         "rx_ok",
3603         "tx_ok",
3604         "rx_error",
3605         "rx_runt_ok",
3606         "rx_runt_err",
3607         "rx_64",
3608         "tx_64",
3609         "rx_65_to_127",
3610         "tx_65_to_127",
3611         "rx_128_to_255",
3612         "tx_128_to_255",
3613         "rx_256_to_511",
3614         "tx_256_to_511",
3615         "rx_512_to_1023",
3616         "tx_512_to_1023",
3617         "rx_1024_to_1518",
3618         "tx_1024_to_1518",
3619         "tx_ether_collisions",
3620         "rx_crc_errors",
3621         "rx_jumbo",
3622         "tx_jumbo",
3623         "rx_mac_control_frames",
3624         "tx_mac_control_frames",
3625         "rx_frame_alignement_errors",
3626         "rx_long_ok",
3627         "rx_long_err",
3628         "tx_sqe_errors",
3629         "rx_no_buf",
3630         "rx_symbol_errors",
3631         "in_range_length_errors",
3632         "late_collisions"
3633 };
3634 
3635 static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3636 {
3637         switch (sset) {
3638         case ETH_SS_STATS:
3639                 memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3640                 break;
3641         }
3642 }
3643 
3644 static int velocity_get_sset_count(struct net_device *dev, int sset)
3645 {
3646         switch (sset) {
3647         case ETH_SS_STATS:
3648                 return ARRAY_SIZE(velocity_gstrings);
3649         default:
3650                 return -EOPNOTSUPP;
3651         }
3652 }
3653 
3654 static void velocity_get_ethtool_stats(struct net_device *dev,
3655                                        struct ethtool_stats *stats, u64 *data)
3656 {
3657         if (netif_running(dev)) {
3658                 struct velocity_info *vptr = netdev_priv(dev);
3659                 u32 *p = vptr->mib_counter;
3660                 int i;
3661 
3662                 spin_lock_irq(&vptr->lock);
3663                 velocity_update_hw_mibs(vptr);
3664                 spin_unlock_irq(&vptr->lock);
3665 
3666                 for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3667                         *data++ = *p++;
3668         }
3669 }
3670 
3671 static const struct ethtool_ops velocity_ethtool_ops = {
3672         .get_settings           = velocity_get_settings,
3673         .set_settings           = velocity_set_settings,
3674         .get_drvinfo            = velocity_get_drvinfo,
3675         .get_wol                = velocity_ethtool_get_wol,
3676         .set_wol                = velocity_ethtool_set_wol,
3677         .get_msglevel           = velocity_get_msglevel,
3678         .set_msglevel           = velocity_set_msglevel,
3679         .get_link               = velocity_get_link,
3680         .get_strings            = velocity_get_strings,
3681         .get_sset_count         = velocity_get_sset_count,
3682         .get_ethtool_stats      = velocity_get_ethtool_stats,
3683         .get_coalesce           = velocity_get_coalesce,
3684         .set_coalesce           = velocity_set_coalesce,
3685         .begin                  = velocity_ethtool_up,
3686         .complete               = velocity_ethtool_down
3687 };
3688 
3689 #if defined(CONFIG_PM) && defined(CONFIG_INET)
3690 static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3691 {
3692         struct in_ifaddr *ifa = ptr;
3693         struct net_device *dev = ifa->ifa_dev->dev;
3694 
3695         if (dev_net(dev) == &init_net &&
3696             dev->netdev_ops == &velocity_netdev_ops)
3697                 velocity_get_ip(netdev_priv(dev));
3698 
3699         return NOTIFY_DONE;
3700 }
3701 
3702 static struct notifier_block velocity_inetaddr_notifier = {
3703         .notifier_call  = velocity_netdev_event,
3704 };
3705 
3706 static void velocity_register_notifier(void)
3707 {
3708         register_inetaddr_notifier(&velocity_inetaddr_notifier);
3709 }
3710 
3711 static void velocity_unregister_notifier(void)
3712 {
3713         unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3714 }
3715 
3716 #else
3717 
3718 #define velocity_register_notifier()    do {} while (0)
3719 #define velocity_unregister_notifier()  do {} while (0)
3720 
3721 #endif  /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3722 
3723 /**
3724  *      velocity_init_module    -       load time function
3725  *
3726  *      Called when the velocity module is loaded. The PCI driver
3727  *      is registered with the PCI layer, and in turn will call
3728  *      the probe functions for each velocity adapter installed
3729  *      in the system.
3730  */
3731 static int __init velocity_init_module(void)
3732 {
3733         int ret_pci, ret_platform;
3734 
3735         velocity_register_notifier();
3736 
3737         ret_pci = pci_register_driver(&velocity_pci_driver);
3738         ret_platform = platform_driver_register(&velocity_platform_driver);
3739 
3740         /* if both_registers failed, remove the notifier */
3741         if ((ret_pci < 0) && (ret_platform < 0)) {
3742                 velocity_unregister_notifier();
3743                 return ret_pci;
3744         }
3745 
3746         return 0;
3747 }
3748 
3749 /**
3750  *      velocity_cleanup        -       module unload
3751  *
3752  *      When the velocity hardware is unloaded this function is called.
3753  *      It will clean up the notifiers and the unregister the PCI
3754  *      driver interface for this hardware. This in turn cleans up
3755  *      all discovered interfaces before returning from the function
3756  */
3757 static void __exit velocity_cleanup_module(void)
3758 {
3759         velocity_unregister_notifier();
3760 
3761         pci_unregister_driver(&velocity_pci_driver);
3762         platform_driver_unregister(&velocity_platform_driver);
3763 }
3764 
3765 module_init(velocity_init_module);
3766 module_exit(velocity_cleanup_module);
3767 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us