Version:  2.0.40 2.2.26 2.4.37 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9

Linux/drivers/net/ethernet/sun/sunqe.c

  1 /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
  2  *          Once again I am out to prove that every ethernet
  3  *          controller out there can be most efficiently programmed
  4  *          if you make it look like a LANCE.
  5  *
  6  * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
  7  */
  8 
  9 #include <linux/module.h>
 10 #include <linux/kernel.h>
 11 #include <linux/types.h>
 12 #include <linux/errno.h>
 13 #include <linux/fcntl.h>
 14 #include <linux/interrupt.h>
 15 #include <linux/ioport.h>
 16 #include <linux/in.h>
 17 #include <linux/slab.h>
 18 #include <linux/string.h>
 19 #include <linux/delay.h>
 20 #include <linux/init.h>
 21 #include <linux/crc32.h>
 22 #include <linux/netdevice.h>
 23 #include <linux/etherdevice.h>
 24 #include <linux/skbuff.h>
 25 #include <linux/ethtool.h>
 26 #include <linux/bitops.h>
 27 #include <linux/dma-mapping.h>
 28 #include <linux/of.h>
 29 #include <linux/of_device.h>
 30 
 31 #include <asm/io.h>
 32 #include <asm/dma.h>
 33 #include <asm/byteorder.h>
 34 #include <asm/idprom.h>
 35 #include <asm/openprom.h>
 36 #include <asm/oplib.h>
 37 #include <asm/auxio.h>
 38 #include <asm/pgtable.h>
 39 #include <asm/irq.h>
 40 
 41 #include "sunqe.h"
 42 
 43 #define DRV_NAME        "sunqe"
 44 #define DRV_VERSION     "4.1"
 45 #define DRV_RELDATE     "August 27, 2008"
 46 #define DRV_AUTHOR      "David S. Miller (davem@davemloft.net)"
 47 
 48 static char version[] =
 49         DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
 50 
 51 MODULE_VERSION(DRV_VERSION);
 52 MODULE_AUTHOR(DRV_AUTHOR);
 53 MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
 54 MODULE_LICENSE("GPL");
 55 
 56 static struct sunqec *root_qec_dev;
 57 
 58 static void qe_set_multicast(struct net_device *dev);
 59 
 60 #define QEC_RESET_TRIES 200
 61 
 62 static inline int qec_global_reset(void __iomem *gregs)
 63 {
 64         int tries = QEC_RESET_TRIES;
 65 
 66         sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
 67         while (--tries) {
 68                 u32 tmp = sbus_readl(gregs + GLOB_CTRL);
 69                 if (tmp & GLOB_CTRL_RESET) {
 70                         udelay(20);
 71                         continue;
 72                 }
 73                 break;
 74         }
 75         if (tries)
 76                 return 0;
 77         printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
 78         return -1;
 79 }
 80 
 81 #define MACE_RESET_RETRIES 200
 82 #define QE_RESET_RETRIES   200
 83 
 84 static inline int qe_stop(struct sunqe *qep)
 85 {
 86         void __iomem *cregs = qep->qcregs;
 87         void __iomem *mregs = qep->mregs;
 88         int tries;
 89 
 90         /* Reset the MACE, then the QEC channel. */
 91         sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
 92         tries = MACE_RESET_RETRIES;
 93         while (--tries) {
 94                 u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
 95                 if (tmp & MREGS_BCONFIG_RESET) {
 96                         udelay(20);
 97                         continue;
 98                 }
 99                 break;
100         }
101         if (!tries) {
102                 printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
103                 return -1;
104         }
105 
106         sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
107         tries = QE_RESET_RETRIES;
108         while (--tries) {
109                 u32 tmp = sbus_readl(cregs + CREG_CTRL);
110                 if (tmp & CREG_CTRL_RESET) {
111                         udelay(20);
112                         continue;
113                 }
114                 break;
115         }
116         if (!tries) {
117                 printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
118                 return -1;
119         }
120         return 0;
121 }
122 
123 static void qe_init_rings(struct sunqe *qep)
124 {
125         struct qe_init_block *qb = qep->qe_block;
126         struct sunqe_buffers *qbufs = qep->buffers;
127         __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
128         int i;
129 
130         qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
131         memset(qb, 0, sizeof(struct qe_init_block));
132         memset(qbufs, 0, sizeof(struct sunqe_buffers));
133         for (i = 0; i < RX_RING_SIZE; i++) {
134                 qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
135                 qb->qe_rxd[i].rx_flags =
136                         (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
137         }
138 }
139 
140 static int qe_init(struct sunqe *qep, int from_irq)
141 {
142         struct sunqec *qecp = qep->parent;
143         void __iomem *cregs = qep->qcregs;
144         void __iomem *mregs = qep->mregs;
145         void __iomem *gregs = qecp->gregs;
146         unsigned char *e = &qep->dev->dev_addr[0];
147         __u32 qblk_dvma = (__u32)qep->qblock_dvma;
148         u32 tmp;
149         int i;
150 
151         /* Shut it up. */
152         if (qe_stop(qep))
153                 return -EAGAIN;
154 
155         /* Setup initial rx/tx init block pointers. */
156         sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
157         sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
158 
159         /* Enable/mask the various irq's. */
160         sbus_writel(0, cregs + CREG_RIMASK);
161         sbus_writel(1, cregs + CREG_TIMASK);
162 
163         sbus_writel(0, cregs + CREG_QMASK);
164         sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
165 
166         /* Setup the FIFO pointers into QEC local memory. */
167         tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
168         sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
169         sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
170 
171         tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
172                 sbus_readl(gregs + GLOB_RSIZE);
173         sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
174         sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
175 
176         /* Clear the channel collision counter. */
177         sbus_writel(0, cregs + CREG_CCNT);
178 
179         /* For 10baseT, inter frame space nor throttle seems to be necessary. */
180         sbus_writel(0, cregs + CREG_PIPG);
181 
182         /* Now dork with the AMD MACE. */
183         sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
184         sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
185         sbus_writeb(0, mregs + MREGS_RXFCNTL);
186 
187         /* The QEC dma's the rx'd packets from local memory out to main memory,
188          * and therefore it interrupts when the packet reception is "complete".
189          * So don't listen for the MACE talking about it.
190          */
191         sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
192         sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
193         sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
194                      MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
195                     mregs + MREGS_FCONFIG);
196 
197         /* Only usable interface on QuadEther is twisted pair. */
198         sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
199 
200         /* Tell MACE we are changing the ether address. */
201         sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
202                     mregs + MREGS_IACONFIG);
203         while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
204                 barrier();
205         sbus_writeb(e[0], mregs + MREGS_ETHADDR);
206         sbus_writeb(e[1], mregs + MREGS_ETHADDR);
207         sbus_writeb(e[2], mregs + MREGS_ETHADDR);
208         sbus_writeb(e[3], mregs + MREGS_ETHADDR);
209         sbus_writeb(e[4], mregs + MREGS_ETHADDR);
210         sbus_writeb(e[5], mregs + MREGS_ETHADDR);
211 
212         /* Clear out the address filter. */
213         sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
214                     mregs + MREGS_IACONFIG);
215         while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
216                 barrier();
217         for (i = 0; i < 8; i++)
218                 sbus_writeb(0, mregs + MREGS_FILTER);
219 
220         /* Address changes are now complete. */
221         sbus_writeb(0, mregs + MREGS_IACONFIG);
222 
223         qe_init_rings(qep);
224 
225         /* Wait a little bit for the link to come up... */
226         mdelay(5);
227         if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
228                 int tries = 50;
229 
230                 while (--tries) {
231                         u8 tmp;
232 
233                         mdelay(5);
234                         barrier();
235                         tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
236                         if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
237                                 break;
238                 }
239                 if (tries == 0)
240                         printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
241         }
242 
243         /* Missed packet counter is cleared on a read. */
244         sbus_readb(mregs + MREGS_MPCNT);
245 
246         /* Reload multicast information, this will enable the receiver
247          * and transmitter.
248          */
249         qe_set_multicast(qep->dev);
250 
251         /* QEC should now start to show interrupts. */
252         return 0;
253 }
254 
255 /* Grrr, certain error conditions completely lock up the AMD MACE,
256  * so when we get these we _must_ reset the chip.
257  */
258 static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
259 {
260         struct net_device *dev = qep->dev;
261         int mace_hwbug_workaround = 0;
262 
263         if (qe_status & CREG_STAT_EDEFER) {
264                 printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
265                 dev->stats.tx_errors++;
266         }
267 
268         if (qe_status & CREG_STAT_CLOSS) {
269                 printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
270                 dev->stats.tx_errors++;
271                 dev->stats.tx_carrier_errors++;
272         }
273 
274         if (qe_status & CREG_STAT_ERETRIES) {
275                 printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
276                 dev->stats.tx_errors++;
277                 mace_hwbug_workaround = 1;
278         }
279 
280         if (qe_status & CREG_STAT_LCOLL) {
281                 printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
282                 dev->stats.tx_errors++;
283                 dev->stats.collisions++;
284                 mace_hwbug_workaround = 1;
285         }
286 
287         if (qe_status & CREG_STAT_FUFLOW) {
288                 printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
289                 dev->stats.tx_errors++;
290                 mace_hwbug_workaround = 1;
291         }
292 
293         if (qe_status & CREG_STAT_JERROR) {
294                 printk(KERN_ERR "%s: Jabber error.\n", dev->name);
295         }
296 
297         if (qe_status & CREG_STAT_BERROR) {
298                 printk(KERN_ERR "%s: Babble error.\n", dev->name);
299         }
300 
301         if (qe_status & CREG_STAT_CCOFLOW) {
302                 dev->stats.tx_errors += 256;
303                 dev->stats.collisions += 256;
304         }
305 
306         if (qe_status & CREG_STAT_TXDERROR) {
307                 printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
308                 dev->stats.tx_errors++;
309                 dev->stats.tx_aborted_errors++;
310                 mace_hwbug_workaround = 1;
311         }
312 
313         if (qe_status & CREG_STAT_TXLERR) {
314                 printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
315                 dev->stats.tx_errors++;
316                 mace_hwbug_workaround = 1;
317         }
318 
319         if (qe_status & CREG_STAT_TXPERR) {
320                 printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
321                 dev->stats.tx_errors++;
322                 dev->stats.tx_aborted_errors++;
323                 mace_hwbug_workaround = 1;
324         }
325 
326         if (qe_status & CREG_STAT_TXSERR) {
327                 printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
328                 dev->stats.tx_errors++;
329                 dev->stats.tx_aborted_errors++;
330                 mace_hwbug_workaround = 1;
331         }
332 
333         if (qe_status & CREG_STAT_RCCOFLOW) {
334                 dev->stats.rx_errors += 256;
335                 dev->stats.collisions += 256;
336         }
337 
338         if (qe_status & CREG_STAT_RUOFLOW) {
339                 dev->stats.rx_errors += 256;
340                 dev->stats.rx_over_errors += 256;
341         }
342 
343         if (qe_status & CREG_STAT_MCOFLOW) {
344                 dev->stats.rx_errors += 256;
345                 dev->stats.rx_missed_errors += 256;
346         }
347 
348         if (qe_status & CREG_STAT_RXFOFLOW) {
349                 printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
350                 dev->stats.rx_errors++;
351                 dev->stats.rx_over_errors++;
352         }
353 
354         if (qe_status & CREG_STAT_RLCOLL) {
355                 printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
356                 dev->stats.rx_errors++;
357                 dev->stats.collisions++;
358         }
359 
360         if (qe_status & CREG_STAT_FCOFLOW) {
361                 dev->stats.rx_errors += 256;
362                 dev->stats.rx_frame_errors += 256;
363         }
364 
365         if (qe_status & CREG_STAT_CECOFLOW) {
366                 dev->stats.rx_errors += 256;
367                 dev->stats.rx_crc_errors += 256;
368         }
369 
370         if (qe_status & CREG_STAT_RXDROP) {
371                 printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
372                 dev->stats.rx_errors++;
373                 dev->stats.rx_dropped++;
374                 dev->stats.rx_missed_errors++;
375         }
376 
377         if (qe_status & CREG_STAT_RXSMALL) {
378                 printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
379                 dev->stats.rx_errors++;
380                 dev->stats.rx_length_errors++;
381         }
382 
383         if (qe_status & CREG_STAT_RXLERR) {
384                 printk(KERN_ERR "%s: Receive late error.\n", dev->name);
385                 dev->stats.rx_errors++;
386                 mace_hwbug_workaround = 1;
387         }
388 
389         if (qe_status & CREG_STAT_RXPERR) {
390                 printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
391                 dev->stats.rx_errors++;
392                 dev->stats.rx_missed_errors++;
393                 mace_hwbug_workaround = 1;
394         }
395 
396         if (qe_status & CREG_STAT_RXSERR) {
397                 printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
398                 dev->stats.rx_errors++;
399                 dev->stats.rx_missed_errors++;
400                 mace_hwbug_workaround = 1;
401         }
402 
403         if (mace_hwbug_workaround)
404                 qe_init(qep, 1);
405         return mace_hwbug_workaround;
406 }
407 
408 /* Per-QE receive interrupt service routine.  Just like on the happy meal
409  * we receive directly into skb's with a small packet copy water mark.
410  */
411 static void qe_rx(struct sunqe *qep)
412 {
413         struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
414         struct net_device *dev = qep->dev;
415         struct qe_rxd *this;
416         struct sunqe_buffers *qbufs = qep->buffers;
417         __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
418         int elem = qep->rx_new;
419         u32 flags;
420 
421         this = &rxbase[elem];
422         while (!((flags = this->rx_flags) & RXD_OWN)) {
423                 struct sk_buff *skb;
424                 unsigned char *this_qbuf =
425                         &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
426                 __u32 this_qbuf_dvma = qbufs_dvma +
427                         qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
428                 struct qe_rxd *end_rxd =
429                         &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
430                 int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */
431 
432                 /* Check for errors. */
433                 if (len < ETH_ZLEN) {
434                         dev->stats.rx_errors++;
435                         dev->stats.rx_length_errors++;
436                         dev->stats.rx_dropped++;
437                 } else {
438                         skb = netdev_alloc_skb(dev, len + 2);
439                         if (skb == NULL) {
440                                 dev->stats.rx_dropped++;
441                         } else {
442                                 skb_reserve(skb, 2);
443                                 skb_put(skb, len);
444                                 skb_copy_to_linear_data(skb, this_qbuf,
445                                                  len);
446                                 skb->protocol = eth_type_trans(skb, qep->dev);
447                                 netif_rx(skb);
448                                 dev->stats.rx_packets++;
449                                 dev->stats.rx_bytes += len;
450                         }
451                 }
452                 end_rxd->rx_addr = this_qbuf_dvma;
453                 end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
454 
455                 elem = NEXT_RX(elem);
456                 this = &rxbase[elem];
457         }
458         qep->rx_new = elem;
459 }
460 
461 static void qe_tx_reclaim(struct sunqe *qep);
462 
463 /* Interrupts for all QE's get filtered out via the QEC master controller,
464  * so we just run through each qe and check to see who is signaling
465  * and thus needs to be serviced.
466  */
467 static irqreturn_t qec_interrupt(int irq, void *dev_id)
468 {
469         struct sunqec *qecp = dev_id;
470         u32 qec_status;
471         int channel = 0;
472 
473         /* Latch the status now. */
474         qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
475         while (channel < 4) {
476                 if (qec_status & 0xf) {
477                         struct sunqe *qep = qecp->qes[channel];
478                         u32 qe_status;
479 
480                         qe_status = sbus_readl(qep->qcregs + CREG_STAT);
481                         if (qe_status & CREG_STAT_ERRORS) {
482                                 if (qe_is_bolixed(qep, qe_status))
483                                         goto next;
484                         }
485                         if (qe_status & CREG_STAT_RXIRQ)
486                                 qe_rx(qep);
487                         if (netif_queue_stopped(qep->dev) &&
488                             (qe_status & CREG_STAT_TXIRQ)) {
489                                 spin_lock(&qep->lock);
490                                 qe_tx_reclaim(qep);
491                                 if (TX_BUFFS_AVAIL(qep) > 0) {
492                                         /* Wake net queue and return to
493                                          * lazy tx reclaim.
494                                          */
495                                         netif_wake_queue(qep->dev);
496                                         sbus_writel(1, qep->qcregs + CREG_TIMASK);
497                                 }
498                                 spin_unlock(&qep->lock);
499                         }
500         next:
501                         ;
502                 }
503                 qec_status >>= 4;
504                 channel++;
505         }
506 
507         return IRQ_HANDLED;
508 }
509 
510 static int qe_open(struct net_device *dev)
511 {
512         struct sunqe *qep = netdev_priv(dev);
513 
514         qep->mconfig = (MREGS_MCONFIG_TXENAB |
515                         MREGS_MCONFIG_RXENAB |
516                         MREGS_MCONFIG_MBAENAB);
517         return qe_init(qep, 0);
518 }
519 
520 static int qe_close(struct net_device *dev)
521 {
522         struct sunqe *qep = netdev_priv(dev);
523 
524         qe_stop(qep);
525         return 0;
526 }
527 
528 /* Reclaim TX'd frames from the ring.  This must always run under
529  * the IRQ protected qep->lock.
530  */
531 static void qe_tx_reclaim(struct sunqe *qep)
532 {
533         struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
534         int elem = qep->tx_old;
535 
536         while (elem != qep->tx_new) {
537                 u32 flags = txbase[elem].tx_flags;
538 
539                 if (flags & TXD_OWN)
540                         break;
541                 elem = NEXT_TX(elem);
542         }
543         qep->tx_old = elem;
544 }
545 
546 static void qe_tx_timeout(struct net_device *dev)
547 {
548         struct sunqe *qep = netdev_priv(dev);
549         int tx_full;
550 
551         spin_lock_irq(&qep->lock);
552 
553         /* Try to reclaim, if that frees up some tx
554          * entries, we're fine.
555          */
556         qe_tx_reclaim(qep);
557         tx_full = TX_BUFFS_AVAIL(qep) <= 0;
558 
559         spin_unlock_irq(&qep->lock);
560 
561         if (! tx_full)
562                 goto out;
563 
564         printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
565         qe_init(qep, 1);
566 
567 out:
568         netif_wake_queue(dev);
569 }
570 
571 /* Get a packet queued to go onto the wire. */
572 static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
573 {
574         struct sunqe *qep = netdev_priv(dev);
575         struct sunqe_buffers *qbufs = qep->buffers;
576         __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
577         unsigned char *txbuf;
578         int len, entry;
579 
580         spin_lock_irq(&qep->lock);
581 
582         qe_tx_reclaim(qep);
583 
584         len = skb->len;
585         entry = qep->tx_new;
586 
587         txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
588         txbuf_dvma = qbufs_dvma +
589                 qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
590 
591         /* Avoid a race... */
592         qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
593 
594         skb_copy_from_linear_data(skb, txbuf, len);
595 
596         qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
597         qep->qe_block->qe_txd[entry].tx_flags =
598                 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
599         qep->tx_new = NEXT_TX(entry);
600 
601         /* Get it going. */
602         sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
603 
604         dev->stats.tx_packets++;
605         dev->stats.tx_bytes += len;
606 
607         if (TX_BUFFS_AVAIL(qep) <= 0) {
608                 /* Halt the net queue and enable tx interrupts.
609                  * When the tx queue empties the tx irq handler
610                  * will wake up the queue and return us back to
611                  * the lazy tx reclaim scheme.
612                  */
613                 netif_stop_queue(dev);
614                 sbus_writel(0, qep->qcregs + CREG_TIMASK);
615         }
616         spin_unlock_irq(&qep->lock);
617 
618         dev_kfree_skb(skb);
619 
620         return NETDEV_TX_OK;
621 }
622 
623 static void qe_set_multicast(struct net_device *dev)
624 {
625         struct sunqe *qep = netdev_priv(dev);
626         struct netdev_hw_addr *ha;
627         u8 new_mconfig = qep->mconfig;
628         int i;
629         u32 crc;
630 
631         /* Lock out others. */
632         netif_stop_queue(dev);
633 
634         if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
635                 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
636                             qep->mregs + MREGS_IACONFIG);
637                 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
638                         barrier();
639                 for (i = 0; i < 8; i++)
640                         sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
641                 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
642         } else if (dev->flags & IFF_PROMISC) {
643                 new_mconfig |= MREGS_MCONFIG_PROMISC;
644         } else {
645                 u16 hash_table[4];
646                 u8 *hbytes = (unsigned char *) &hash_table[0];
647 
648                 memset(hash_table, 0, sizeof(hash_table));
649                 netdev_for_each_mc_addr(ha, dev) {
650                         crc = ether_crc_le(6, ha->addr);
651                         crc >>= 26;
652                         hash_table[crc >> 4] |= 1 << (crc & 0xf);
653                 }
654                 /* Program the qe with the new filter value. */
655                 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
656                             qep->mregs + MREGS_IACONFIG);
657                 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
658                         barrier();
659                 for (i = 0; i < 8; i++) {
660                         u8 tmp = *hbytes++;
661                         sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
662                 }
663                 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
664         }
665 
666         /* Any change of the logical address filter, the physical address,
667          * or enabling/disabling promiscuous mode causes the MACE to disable
668          * the receiver.  So we must re-enable them here or else the MACE
669          * refuses to listen to anything on the network.  Sheesh, took
670          * me a day or two to find this bug.
671          */
672         qep->mconfig = new_mconfig;
673         sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
674 
675         /* Let us get going again. */
676         netif_wake_queue(dev);
677 }
678 
679 /* Ethtool support... */
680 static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
681 {
682         const struct linux_prom_registers *regs;
683         struct sunqe *qep = netdev_priv(dev);
684         struct platform_device *op;
685 
686         strlcpy(info->driver, "sunqe", sizeof(info->driver));
687         strlcpy(info->version, "3.0", sizeof(info->version));
688 
689         op = qep->op;
690         regs = of_get_property(op->dev.of_node, "reg", NULL);
691         if (regs)
692                 snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
693                          regs->which_io);
694 
695 }
696 
697 static u32 qe_get_link(struct net_device *dev)
698 {
699         struct sunqe *qep = netdev_priv(dev);
700         void __iomem *mregs = qep->mregs;
701         u8 phyconfig;
702 
703         spin_lock_irq(&qep->lock);
704         phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
705         spin_unlock_irq(&qep->lock);
706 
707         return phyconfig & MREGS_PHYCONFIG_LSTAT;
708 }
709 
710 static const struct ethtool_ops qe_ethtool_ops = {
711         .get_drvinfo            = qe_get_drvinfo,
712         .get_link               = qe_get_link,
713 };
714 
715 /* This is only called once at boot time for each card probed. */
716 static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
717 {
718         u8 bsizes = qecp->qec_bursts;
719 
720         if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
721                 sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
722         } else if (bsizes & DMA_BURST32) {
723                 sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
724         } else {
725                 sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
726         }
727 
728         /* Packetsize only used in 100baseT BigMAC configurations,
729          * set it to zero just to be on the safe side.
730          */
731         sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
732 
733         /* Set the local memsize register, divided up to one piece per QE channel. */
734         sbus_writel((resource_size(&op->resource[1]) >> 2),
735                     qecp->gregs + GLOB_MSIZE);
736 
737         /* Divide up the local QEC memory amongst the 4 QE receiver and
738          * transmitter FIFOs.  Basically it is (total / 2 / num_channels).
739          */
740         sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
741                     qecp->gregs + GLOB_TSIZE);
742         sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
743                     qecp->gregs + GLOB_RSIZE);
744 }
745 
746 static u8 qec_get_burst(struct device_node *dp)
747 {
748         u8 bsizes, bsizes_more;
749 
750         /* Find and set the burst sizes for the QEC, since it
751          * does the actual dma for all 4 channels.
752          */
753         bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
754         bsizes &= 0xff;
755         bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
756 
757         if (bsizes_more != 0xff)
758                 bsizes &= bsizes_more;
759         if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
760             (bsizes & DMA_BURST32)==0)
761                 bsizes = (DMA_BURST32 - 1);
762 
763         return bsizes;
764 }
765 
766 static struct sunqec *get_qec(struct platform_device *child)
767 {
768         struct platform_device *op = to_platform_device(child->dev.parent);
769         struct sunqec *qecp;
770 
771         qecp = platform_get_drvdata(op);
772         if (!qecp) {
773                 qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
774                 if (qecp) {
775                         u32 ctrl;
776 
777                         qecp->op = op;
778                         qecp->gregs = of_ioremap(&op->resource[0], 0,
779                                                  GLOB_REG_SIZE,
780                                                  "QEC Global Registers");
781                         if (!qecp->gregs)
782                                 goto fail;
783 
784                         /* Make sure the QEC is in MACE mode. */
785                         ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
786                         ctrl &= 0xf0000000;
787                         if (ctrl != GLOB_CTRL_MMODE) {
788                                 printk(KERN_ERR "qec: Not in MACE mode!\n");
789                                 goto fail;
790                         }
791 
792                         if (qec_global_reset(qecp->gregs))
793                                 goto fail;
794 
795                         qecp->qec_bursts = qec_get_burst(op->dev.of_node);
796 
797                         qec_init_once(qecp, op);
798 
799                         if (request_irq(op->archdata.irqs[0], qec_interrupt,
800                                         IRQF_SHARED, "qec", (void *) qecp)) {
801                                 printk(KERN_ERR "qec: Can't register irq.\n");
802                                 goto fail;
803                         }
804 
805                         platform_set_drvdata(op, qecp);
806 
807                         qecp->next_module = root_qec_dev;
808                         root_qec_dev = qecp;
809                 }
810         }
811 
812         return qecp;
813 
814 fail:
815         if (qecp->gregs)
816                 of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
817         kfree(qecp);
818         return NULL;
819 }
820 
821 static const struct net_device_ops qec_ops = {
822         .ndo_open               = qe_open,
823         .ndo_stop               = qe_close,
824         .ndo_start_xmit         = qe_start_xmit,
825         .ndo_set_rx_mode        = qe_set_multicast,
826         .ndo_tx_timeout         = qe_tx_timeout,
827         .ndo_change_mtu         = eth_change_mtu,
828         .ndo_set_mac_address    = eth_mac_addr,
829         .ndo_validate_addr      = eth_validate_addr,
830 };
831 
832 static int qec_ether_init(struct platform_device *op)
833 {
834         static unsigned version_printed;
835         struct net_device *dev;
836         struct sunqec *qecp;
837         struct sunqe *qe;
838         int i, res;
839 
840         if (version_printed++ == 0)
841                 printk(KERN_INFO "%s", version);
842 
843         dev = alloc_etherdev(sizeof(struct sunqe));
844         if (!dev)
845                 return -ENOMEM;
846 
847         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
848 
849         qe = netdev_priv(dev);
850 
851         res = -ENODEV;
852 
853         i = of_getintprop_default(op->dev.of_node, "channel#", -1);
854         if (i == -1)
855                 goto fail;
856         qe->channel = i;
857         spin_lock_init(&qe->lock);
858 
859         qecp = get_qec(op);
860         if (!qecp)
861                 goto fail;
862 
863         qecp->qes[qe->channel] = qe;
864         qe->dev = dev;
865         qe->parent = qecp;
866         qe->op = op;
867 
868         res = -ENOMEM;
869         qe->qcregs = of_ioremap(&op->resource[0], 0,
870                                 CREG_REG_SIZE, "QEC Channel Registers");
871         if (!qe->qcregs) {
872                 printk(KERN_ERR "qe: Cannot map channel registers.\n");
873                 goto fail;
874         }
875 
876         qe->mregs = of_ioremap(&op->resource[1], 0,
877                                MREGS_REG_SIZE, "QE MACE Registers");
878         if (!qe->mregs) {
879                 printk(KERN_ERR "qe: Cannot map MACE registers.\n");
880                 goto fail;
881         }
882 
883         qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
884                                           &qe->qblock_dvma, GFP_ATOMIC);
885         qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
886                                          &qe->buffers_dvma, GFP_ATOMIC);
887         if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
888             qe->buffers == NULL || qe->buffers_dvma == 0)
889                 goto fail;
890 
891         /* Stop this QE. */
892         qe_stop(qe);
893 
894         SET_NETDEV_DEV(dev, &op->dev);
895 
896         dev->watchdog_timeo = 5*HZ;
897         dev->irq = op->archdata.irqs[0];
898         dev->dma = 0;
899         dev->ethtool_ops = &qe_ethtool_ops;
900         dev->netdev_ops = &qec_ops;
901 
902         res = register_netdev(dev);
903         if (res)
904                 goto fail;
905 
906         platform_set_drvdata(op, qe);
907 
908         printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
909                dev->dev_addr);
910         return 0;
911 
912 fail:
913         if (qe->qcregs)
914                 of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
915         if (qe->mregs)
916                 of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
917         if (qe->qe_block)
918                 dma_free_coherent(&op->dev, PAGE_SIZE,
919                                   qe->qe_block, qe->qblock_dvma);
920         if (qe->buffers)
921                 dma_free_coherent(&op->dev,
922                                   sizeof(struct sunqe_buffers),
923                                   qe->buffers,
924                                   qe->buffers_dvma);
925 
926         free_netdev(dev);
927 
928         return res;
929 }
930 
931 static int qec_sbus_probe(struct platform_device *op)
932 {
933         return qec_ether_init(op);
934 }
935 
936 static int qec_sbus_remove(struct platform_device *op)
937 {
938         struct sunqe *qp = platform_get_drvdata(op);
939         struct net_device *net_dev = qp->dev;
940 
941         unregister_netdev(net_dev);
942 
943         of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
944         of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
945         dma_free_coherent(&op->dev, PAGE_SIZE,
946                           qp->qe_block, qp->qblock_dvma);
947         dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
948                           qp->buffers, qp->buffers_dvma);
949 
950         free_netdev(net_dev);
951 
952         return 0;
953 }
954 
955 static const struct of_device_id qec_sbus_match[] = {
956         {
957                 .name = "qe",
958         },
959         {},
960 };
961 
962 MODULE_DEVICE_TABLE(of, qec_sbus_match);
963 
964 static struct platform_driver qec_sbus_driver = {
965         .driver = {
966                 .name = "qec",
967                 .of_match_table = qec_sbus_match,
968         },
969         .probe          = qec_sbus_probe,
970         .remove         = qec_sbus_remove,
971 };
972 
973 static int __init qec_init(void)
974 {
975         return platform_driver_register(&qec_sbus_driver);
976 }
977 
978 static void __exit qec_exit(void)
979 {
980         platform_driver_unregister(&qec_sbus_driver);
981 
982         while (root_qec_dev) {
983                 struct sunqec *next = root_qec_dev->next_module;
984                 struct platform_device *op = root_qec_dev->op;
985 
986                 free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
987                 of_iounmap(&op->resource[0], root_qec_dev->gregs,
988                            GLOB_REG_SIZE);
989                 kfree(root_qec_dev);
990 
991                 root_qec_dev = next;
992         }
993 }
994 
995 module_init(qec_init);
996 module_exit(qec_exit);
997 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us