Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/net/ethernet/sun/sunqe.c

  1 /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
  2  *          Once again I am out to prove that every ethernet
  3  *          controller out there can be most efficiently programmed
  4  *          if you make it look like a LANCE.
  5  *
  6  * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
  7  */
  8 
  9 #include <linux/module.h>
 10 #include <linux/kernel.h>
 11 #include <linux/types.h>
 12 #include <linux/errno.h>
 13 #include <linux/fcntl.h>
 14 #include <linux/interrupt.h>
 15 #include <linux/ioport.h>
 16 #include <linux/in.h>
 17 #include <linux/slab.h>
 18 #include <linux/string.h>
 19 #include <linux/delay.h>
 20 #include <linux/init.h>
 21 #include <linux/crc32.h>
 22 #include <linux/netdevice.h>
 23 #include <linux/etherdevice.h>
 24 #include <linux/skbuff.h>
 25 #include <linux/ethtool.h>
 26 #include <linux/bitops.h>
 27 #include <linux/dma-mapping.h>
 28 #include <linux/of.h>
 29 #include <linux/of_device.h>
 30 
 31 #include <asm/io.h>
 32 #include <asm/dma.h>
 33 #include <asm/byteorder.h>
 34 #include <asm/idprom.h>
 35 #include <asm/openprom.h>
 36 #include <asm/oplib.h>
 37 #include <asm/auxio.h>
 38 #include <asm/pgtable.h>
 39 #include <asm/irq.h>
 40 
 41 #include "sunqe.h"
 42 
 43 #define DRV_NAME        "sunqe"
 44 #define DRV_VERSION     "4.1"
 45 #define DRV_RELDATE     "August 27, 2008"
 46 #define DRV_AUTHOR      "David S. Miller (davem@davemloft.net)"
 47 
 48 static char version[] =
 49         DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
 50 
 51 MODULE_VERSION(DRV_VERSION);
 52 MODULE_AUTHOR(DRV_AUTHOR);
 53 MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
 54 MODULE_LICENSE("GPL");
 55 
 56 static struct sunqec *root_qec_dev;
 57 
 58 static void qe_set_multicast(struct net_device *dev);
 59 
 60 #define QEC_RESET_TRIES 200
 61 
 62 static inline int qec_global_reset(void __iomem *gregs)
 63 {
 64         int tries = QEC_RESET_TRIES;
 65 
 66         sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
 67         while (--tries) {
 68                 u32 tmp = sbus_readl(gregs + GLOB_CTRL);
 69                 if (tmp & GLOB_CTRL_RESET) {
 70                         udelay(20);
 71                         continue;
 72                 }
 73                 break;
 74         }
 75         if (tries)
 76                 return 0;
 77         printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
 78         return -1;
 79 }
 80 
 81 #define MACE_RESET_RETRIES 200
 82 #define QE_RESET_RETRIES   200
 83 
 84 static inline int qe_stop(struct sunqe *qep)
 85 {
 86         void __iomem *cregs = qep->qcregs;
 87         void __iomem *mregs = qep->mregs;
 88         int tries;
 89 
 90         /* Reset the MACE, then the QEC channel. */
 91         sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
 92         tries = MACE_RESET_RETRIES;
 93         while (--tries) {
 94                 u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
 95                 if (tmp & MREGS_BCONFIG_RESET) {
 96                         udelay(20);
 97                         continue;
 98                 }
 99                 break;
100         }
101         if (!tries) {
102                 printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
103                 return -1;
104         }
105 
106         sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
107         tries = QE_RESET_RETRIES;
108         while (--tries) {
109                 u32 tmp = sbus_readl(cregs + CREG_CTRL);
110                 if (tmp & CREG_CTRL_RESET) {
111                         udelay(20);
112                         continue;
113                 }
114                 break;
115         }
116         if (!tries) {
117                 printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
118                 return -1;
119         }
120         return 0;
121 }
122 
123 static void qe_init_rings(struct sunqe *qep)
124 {
125         struct qe_init_block *qb = qep->qe_block;
126         struct sunqe_buffers *qbufs = qep->buffers;
127         __u32 qbufs_dvma = qep->buffers_dvma;
128         int i;
129 
130         qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
131         memset(qb, 0, sizeof(struct qe_init_block));
132         memset(qbufs, 0, sizeof(struct sunqe_buffers));
133         for (i = 0; i < RX_RING_SIZE; i++) {
134                 qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
135                 qb->qe_rxd[i].rx_flags =
136                         (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
137         }
138 }
139 
140 static int qe_init(struct sunqe *qep, int from_irq)
141 {
142         struct sunqec *qecp = qep->parent;
143         void __iomem *cregs = qep->qcregs;
144         void __iomem *mregs = qep->mregs;
145         void __iomem *gregs = qecp->gregs;
146         unsigned char *e = &qep->dev->dev_addr[0];
147         u32 tmp;
148         int i;
149 
150         /* Shut it up. */
151         if (qe_stop(qep))
152                 return -EAGAIN;
153 
154         /* Setup initial rx/tx init block pointers. */
155         sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
156         sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
157 
158         /* Enable/mask the various irq's. */
159         sbus_writel(0, cregs + CREG_RIMASK);
160         sbus_writel(1, cregs + CREG_TIMASK);
161 
162         sbus_writel(0, cregs + CREG_QMASK);
163         sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
164 
165         /* Setup the FIFO pointers into QEC local memory. */
166         tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
167         sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
168         sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
169 
170         tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
171                 sbus_readl(gregs + GLOB_RSIZE);
172         sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
173         sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
174 
175         /* Clear the channel collision counter. */
176         sbus_writel(0, cregs + CREG_CCNT);
177 
178         /* For 10baseT, inter frame space nor throttle seems to be necessary. */
179         sbus_writel(0, cregs + CREG_PIPG);
180 
181         /* Now dork with the AMD MACE. */
182         sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
183         sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
184         sbus_writeb(0, mregs + MREGS_RXFCNTL);
185 
186         /* The QEC dma's the rx'd packets from local memory out to main memory,
187          * and therefore it interrupts when the packet reception is "complete".
188          * So don't listen for the MACE talking about it.
189          */
190         sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
191         sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
192         sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
193                      MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
194                     mregs + MREGS_FCONFIG);
195 
196         /* Only usable interface on QuadEther is twisted pair. */
197         sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
198 
199         /* Tell MACE we are changing the ether address. */
200         sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
201                     mregs + MREGS_IACONFIG);
202         while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
203                 barrier();
204         sbus_writeb(e[0], mregs + MREGS_ETHADDR);
205         sbus_writeb(e[1], mregs + MREGS_ETHADDR);
206         sbus_writeb(e[2], mregs + MREGS_ETHADDR);
207         sbus_writeb(e[3], mregs + MREGS_ETHADDR);
208         sbus_writeb(e[4], mregs + MREGS_ETHADDR);
209         sbus_writeb(e[5], mregs + MREGS_ETHADDR);
210 
211         /* Clear out the address filter. */
212         sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
213                     mregs + MREGS_IACONFIG);
214         while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
215                 barrier();
216         for (i = 0; i < 8; i++)
217                 sbus_writeb(0, mregs + MREGS_FILTER);
218 
219         /* Address changes are now complete. */
220         sbus_writeb(0, mregs + MREGS_IACONFIG);
221 
222         qe_init_rings(qep);
223 
224         /* Wait a little bit for the link to come up... */
225         mdelay(5);
226         if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
227                 int tries = 50;
228 
229                 while (--tries) {
230                         u8 tmp;
231 
232                         mdelay(5);
233                         barrier();
234                         tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
235                         if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
236                                 break;
237                 }
238                 if (tries == 0)
239                         printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
240         }
241 
242         /* Missed packet counter is cleared on a read. */
243         sbus_readb(mregs + MREGS_MPCNT);
244 
245         /* Reload multicast information, this will enable the receiver
246          * and transmitter.
247          */
248         qe_set_multicast(qep->dev);
249 
250         /* QEC should now start to show interrupts. */
251         return 0;
252 }
253 
254 /* Grrr, certain error conditions completely lock up the AMD MACE,
255  * so when we get these we _must_ reset the chip.
256  */
257 static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
258 {
259         struct net_device *dev = qep->dev;
260         int mace_hwbug_workaround = 0;
261 
262         if (qe_status & CREG_STAT_EDEFER) {
263                 printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
264                 dev->stats.tx_errors++;
265         }
266 
267         if (qe_status & CREG_STAT_CLOSS) {
268                 printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
269                 dev->stats.tx_errors++;
270                 dev->stats.tx_carrier_errors++;
271         }
272 
273         if (qe_status & CREG_STAT_ERETRIES) {
274                 printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
275                 dev->stats.tx_errors++;
276                 mace_hwbug_workaround = 1;
277         }
278 
279         if (qe_status & CREG_STAT_LCOLL) {
280                 printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
281                 dev->stats.tx_errors++;
282                 dev->stats.collisions++;
283                 mace_hwbug_workaround = 1;
284         }
285 
286         if (qe_status & CREG_STAT_FUFLOW) {
287                 printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
288                 dev->stats.tx_errors++;
289                 mace_hwbug_workaround = 1;
290         }
291 
292         if (qe_status & CREG_STAT_JERROR) {
293                 printk(KERN_ERR "%s: Jabber error.\n", dev->name);
294         }
295 
296         if (qe_status & CREG_STAT_BERROR) {
297                 printk(KERN_ERR "%s: Babble error.\n", dev->name);
298         }
299 
300         if (qe_status & CREG_STAT_CCOFLOW) {
301                 dev->stats.tx_errors += 256;
302                 dev->stats.collisions += 256;
303         }
304 
305         if (qe_status & CREG_STAT_TXDERROR) {
306                 printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
307                 dev->stats.tx_errors++;
308                 dev->stats.tx_aborted_errors++;
309                 mace_hwbug_workaround = 1;
310         }
311 
312         if (qe_status & CREG_STAT_TXLERR) {
313                 printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
314                 dev->stats.tx_errors++;
315                 mace_hwbug_workaround = 1;
316         }
317 
318         if (qe_status & CREG_STAT_TXPERR) {
319                 printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
320                 dev->stats.tx_errors++;
321                 dev->stats.tx_aborted_errors++;
322                 mace_hwbug_workaround = 1;
323         }
324 
325         if (qe_status & CREG_STAT_TXSERR) {
326                 printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
327                 dev->stats.tx_errors++;
328                 dev->stats.tx_aborted_errors++;
329                 mace_hwbug_workaround = 1;
330         }
331 
332         if (qe_status & CREG_STAT_RCCOFLOW) {
333                 dev->stats.rx_errors += 256;
334                 dev->stats.collisions += 256;
335         }
336 
337         if (qe_status & CREG_STAT_RUOFLOW) {
338                 dev->stats.rx_errors += 256;
339                 dev->stats.rx_over_errors += 256;
340         }
341 
342         if (qe_status & CREG_STAT_MCOFLOW) {
343                 dev->stats.rx_errors += 256;
344                 dev->stats.rx_missed_errors += 256;
345         }
346 
347         if (qe_status & CREG_STAT_RXFOFLOW) {
348                 printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
349                 dev->stats.rx_errors++;
350                 dev->stats.rx_over_errors++;
351         }
352 
353         if (qe_status & CREG_STAT_RLCOLL) {
354                 printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
355                 dev->stats.rx_errors++;
356                 dev->stats.collisions++;
357         }
358 
359         if (qe_status & CREG_STAT_FCOFLOW) {
360                 dev->stats.rx_errors += 256;
361                 dev->stats.rx_frame_errors += 256;
362         }
363 
364         if (qe_status & CREG_STAT_CECOFLOW) {
365                 dev->stats.rx_errors += 256;
366                 dev->stats.rx_crc_errors += 256;
367         }
368 
369         if (qe_status & CREG_STAT_RXDROP) {
370                 printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
371                 dev->stats.rx_errors++;
372                 dev->stats.rx_dropped++;
373                 dev->stats.rx_missed_errors++;
374         }
375 
376         if (qe_status & CREG_STAT_RXSMALL) {
377                 printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
378                 dev->stats.rx_errors++;
379                 dev->stats.rx_length_errors++;
380         }
381 
382         if (qe_status & CREG_STAT_RXLERR) {
383                 printk(KERN_ERR "%s: Receive late error.\n", dev->name);
384                 dev->stats.rx_errors++;
385                 mace_hwbug_workaround = 1;
386         }
387 
388         if (qe_status & CREG_STAT_RXPERR) {
389                 printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
390                 dev->stats.rx_errors++;
391                 dev->stats.rx_missed_errors++;
392                 mace_hwbug_workaround = 1;
393         }
394 
395         if (qe_status & CREG_STAT_RXSERR) {
396                 printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
397                 dev->stats.rx_errors++;
398                 dev->stats.rx_missed_errors++;
399                 mace_hwbug_workaround = 1;
400         }
401 
402         if (mace_hwbug_workaround)
403                 qe_init(qep, 1);
404         return mace_hwbug_workaround;
405 }
406 
407 /* Per-QE receive interrupt service routine.  Just like on the happy meal
408  * we receive directly into skb's with a small packet copy water mark.
409  */
410 static void qe_rx(struct sunqe *qep)
411 {
412         struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
413         struct net_device *dev = qep->dev;
414         struct qe_rxd *this;
415         struct sunqe_buffers *qbufs = qep->buffers;
416         __u32 qbufs_dvma = qep->buffers_dvma;
417         int elem = qep->rx_new;
418         u32 flags;
419 
420         this = &rxbase[elem];
421         while (!((flags = this->rx_flags) & RXD_OWN)) {
422                 struct sk_buff *skb;
423                 unsigned char *this_qbuf =
424                         &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
425                 __u32 this_qbuf_dvma = qbufs_dvma +
426                         qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
427                 struct qe_rxd *end_rxd =
428                         &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
429                 int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */
430 
431                 /* Check for errors. */
432                 if (len < ETH_ZLEN) {
433                         dev->stats.rx_errors++;
434                         dev->stats.rx_length_errors++;
435                         dev->stats.rx_dropped++;
436                 } else {
437                         skb = netdev_alloc_skb(dev, len + 2);
438                         if (skb == NULL) {
439                                 dev->stats.rx_dropped++;
440                         } else {
441                                 skb_reserve(skb, 2);
442                                 skb_put(skb, len);
443                                 skb_copy_to_linear_data(skb, this_qbuf,
444                                                  len);
445                                 skb->protocol = eth_type_trans(skb, qep->dev);
446                                 netif_rx(skb);
447                                 dev->stats.rx_packets++;
448                                 dev->stats.rx_bytes += len;
449                         }
450                 }
451                 end_rxd->rx_addr = this_qbuf_dvma;
452                 end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
453 
454                 elem = NEXT_RX(elem);
455                 this = &rxbase[elem];
456         }
457         qep->rx_new = elem;
458 }
459 
460 static void qe_tx_reclaim(struct sunqe *qep);
461 
462 /* Interrupts for all QE's get filtered out via the QEC master controller,
463  * so we just run through each qe and check to see who is signaling
464  * and thus needs to be serviced.
465  */
466 static irqreturn_t qec_interrupt(int irq, void *dev_id)
467 {
468         struct sunqec *qecp = dev_id;
469         u32 qec_status;
470         int channel = 0;
471 
472         /* Latch the status now. */
473         qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
474         while (channel < 4) {
475                 if (qec_status & 0xf) {
476                         struct sunqe *qep = qecp->qes[channel];
477                         u32 qe_status;
478 
479                         qe_status = sbus_readl(qep->qcregs + CREG_STAT);
480                         if (qe_status & CREG_STAT_ERRORS) {
481                                 if (qe_is_bolixed(qep, qe_status))
482                                         goto next;
483                         }
484                         if (qe_status & CREG_STAT_RXIRQ)
485                                 qe_rx(qep);
486                         if (netif_queue_stopped(qep->dev) &&
487                             (qe_status & CREG_STAT_TXIRQ)) {
488                                 spin_lock(&qep->lock);
489                                 qe_tx_reclaim(qep);
490                                 if (TX_BUFFS_AVAIL(qep) > 0) {
491                                         /* Wake net queue and return to
492                                          * lazy tx reclaim.
493                                          */
494                                         netif_wake_queue(qep->dev);
495                                         sbus_writel(1, qep->qcregs + CREG_TIMASK);
496                                 }
497                                 spin_unlock(&qep->lock);
498                         }
499         next:
500                         ;
501                 }
502                 qec_status >>= 4;
503                 channel++;
504         }
505 
506         return IRQ_HANDLED;
507 }
508 
509 static int qe_open(struct net_device *dev)
510 {
511         struct sunqe *qep = netdev_priv(dev);
512 
513         qep->mconfig = (MREGS_MCONFIG_TXENAB |
514                         MREGS_MCONFIG_RXENAB |
515                         MREGS_MCONFIG_MBAENAB);
516         return qe_init(qep, 0);
517 }
518 
519 static int qe_close(struct net_device *dev)
520 {
521         struct sunqe *qep = netdev_priv(dev);
522 
523         qe_stop(qep);
524         return 0;
525 }
526 
527 /* Reclaim TX'd frames from the ring.  This must always run under
528  * the IRQ protected qep->lock.
529  */
530 static void qe_tx_reclaim(struct sunqe *qep)
531 {
532         struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
533         int elem = qep->tx_old;
534 
535         while (elem != qep->tx_new) {
536                 u32 flags = txbase[elem].tx_flags;
537 
538                 if (flags & TXD_OWN)
539                         break;
540                 elem = NEXT_TX(elem);
541         }
542         qep->tx_old = elem;
543 }
544 
545 static void qe_tx_timeout(struct net_device *dev)
546 {
547         struct sunqe *qep = netdev_priv(dev);
548         int tx_full;
549 
550         spin_lock_irq(&qep->lock);
551 
552         /* Try to reclaim, if that frees up some tx
553          * entries, we're fine.
554          */
555         qe_tx_reclaim(qep);
556         tx_full = TX_BUFFS_AVAIL(qep) <= 0;
557 
558         spin_unlock_irq(&qep->lock);
559 
560         if (! tx_full)
561                 goto out;
562 
563         printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
564         qe_init(qep, 1);
565 
566 out:
567         netif_wake_queue(dev);
568 }
569 
570 /* Get a packet queued to go onto the wire. */
571 static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
572 {
573         struct sunqe *qep = netdev_priv(dev);
574         struct sunqe_buffers *qbufs = qep->buffers;
575         __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
576         unsigned char *txbuf;
577         int len, entry;
578 
579         spin_lock_irq(&qep->lock);
580 
581         qe_tx_reclaim(qep);
582 
583         len = skb->len;
584         entry = qep->tx_new;
585 
586         txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
587         txbuf_dvma = qbufs_dvma +
588                 qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
589 
590         /* Avoid a race... */
591         qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
592 
593         skb_copy_from_linear_data(skb, txbuf, len);
594 
595         qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
596         qep->qe_block->qe_txd[entry].tx_flags =
597                 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
598         qep->tx_new = NEXT_TX(entry);
599 
600         /* Get it going. */
601         sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
602 
603         dev->stats.tx_packets++;
604         dev->stats.tx_bytes += len;
605 
606         if (TX_BUFFS_AVAIL(qep) <= 0) {
607                 /* Halt the net queue and enable tx interrupts.
608                  * When the tx queue empties the tx irq handler
609                  * will wake up the queue and return us back to
610                  * the lazy tx reclaim scheme.
611                  */
612                 netif_stop_queue(dev);
613                 sbus_writel(0, qep->qcregs + CREG_TIMASK);
614         }
615         spin_unlock_irq(&qep->lock);
616 
617         dev_kfree_skb(skb);
618 
619         return NETDEV_TX_OK;
620 }
621 
622 static void qe_set_multicast(struct net_device *dev)
623 {
624         struct sunqe *qep = netdev_priv(dev);
625         struct netdev_hw_addr *ha;
626         u8 new_mconfig = qep->mconfig;
627         int i;
628         u32 crc;
629 
630         /* Lock out others. */
631         netif_stop_queue(dev);
632 
633         if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
634                 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
635                             qep->mregs + MREGS_IACONFIG);
636                 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
637                         barrier();
638                 for (i = 0; i < 8; i++)
639                         sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
640                 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
641         } else if (dev->flags & IFF_PROMISC) {
642                 new_mconfig |= MREGS_MCONFIG_PROMISC;
643         } else {
644                 u16 hash_table[4];
645                 u8 *hbytes = (unsigned char *) &hash_table[0];
646 
647                 memset(hash_table, 0, sizeof(hash_table));
648                 netdev_for_each_mc_addr(ha, dev) {
649                         crc = ether_crc_le(6, ha->addr);
650                         crc >>= 26;
651                         hash_table[crc >> 4] |= 1 << (crc & 0xf);
652                 }
653                 /* Program the qe with the new filter value. */
654                 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
655                             qep->mregs + MREGS_IACONFIG);
656                 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
657                         barrier();
658                 for (i = 0; i < 8; i++) {
659                         u8 tmp = *hbytes++;
660                         sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
661                 }
662                 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
663         }
664 
665         /* Any change of the logical address filter, the physical address,
666          * or enabling/disabling promiscuous mode causes the MACE to disable
667          * the receiver.  So we must re-enable them here or else the MACE
668          * refuses to listen to anything on the network.  Sheesh, took
669          * me a day or two to find this bug.
670          */
671         qep->mconfig = new_mconfig;
672         sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
673 
674         /* Let us get going again. */
675         netif_wake_queue(dev);
676 }
677 
678 /* Ethtool support... */
679 static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
680 {
681         const struct linux_prom_registers *regs;
682         struct sunqe *qep = netdev_priv(dev);
683         struct platform_device *op;
684 
685         strlcpy(info->driver, "sunqe", sizeof(info->driver));
686         strlcpy(info->version, "3.0", sizeof(info->version));
687 
688         op = qep->op;
689         regs = of_get_property(op->dev.of_node, "reg", NULL);
690         if (regs)
691                 snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
692                          regs->which_io);
693 
694 }
695 
696 static u32 qe_get_link(struct net_device *dev)
697 {
698         struct sunqe *qep = netdev_priv(dev);
699         void __iomem *mregs = qep->mregs;
700         u8 phyconfig;
701 
702         spin_lock_irq(&qep->lock);
703         phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
704         spin_unlock_irq(&qep->lock);
705 
706         return phyconfig & MREGS_PHYCONFIG_LSTAT;
707 }
708 
709 static const struct ethtool_ops qe_ethtool_ops = {
710         .get_drvinfo            = qe_get_drvinfo,
711         .get_link               = qe_get_link,
712 };
713 
714 /* This is only called once at boot time for each card probed. */
715 static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
716 {
717         u8 bsizes = qecp->qec_bursts;
718 
719         if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
720                 sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
721         } else if (bsizes & DMA_BURST32) {
722                 sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
723         } else {
724                 sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
725         }
726 
727         /* Packetsize only used in 100baseT BigMAC configurations,
728          * set it to zero just to be on the safe side.
729          */
730         sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
731 
732         /* Set the local memsize register, divided up to one piece per QE channel. */
733         sbus_writel((resource_size(&op->resource[1]) >> 2),
734                     qecp->gregs + GLOB_MSIZE);
735 
736         /* Divide up the local QEC memory amongst the 4 QE receiver and
737          * transmitter FIFOs.  Basically it is (total / 2 / num_channels).
738          */
739         sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
740                     qecp->gregs + GLOB_TSIZE);
741         sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
742                     qecp->gregs + GLOB_RSIZE);
743 }
744 
745 static u8 qec_get_burst(struct device_node *dp)
746 {
747         u8 bsizes, bsizes_more;
748 
749         /* Find and set the burst sizes for the QEC, since it
750          * does the actual dma for all 4 channels.
751          */
752         bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
753         bsizes &= 0xff;
754         bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
755 
756         if (bsizes_more != 0xff)
757                 bsizes &= bsizes_more;
758         if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
759             (bsizes & DMA_BURST32)==0)
760                 bsizes = (DMA_BURST32 - 1);
761 
762         return bsizes;
763 }
764 
765 static struct sunqec *get_qec(struct platform_device *child)
766 {
767         struct platform_device *op = to_platform_device(child->dev.parent);
768         struct sunqec *qecp;
769 
770         qecp = platform_get_drvdata(op);
771         if (!qecp) {
772                 qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
773                 if (qecp) {
774                         u32 ctrl;
775 
776                         qecp->op = op;
777                         qecp->gregs = of_ioremap(&op->resource[0], 0,
778                                                  GLOB_REG_SIZE,
779                                                  "QEC Global Registers");
780                         if (!qecp->gregs)
781                                 goto fail;
782 
783                         /* Make sure the QEC is in MACE mode. */
784                         ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
785                         ctrl &= 0xf0000000;
786                         if (ctrl != GLOB_CTRL_MMODE) {
787                                 printk(KERN_ERR "qec: Not in MACE mode!\n");
788                                 goto fail;
789                         }
790 
791                         if (qec_global_reset(qecp->gregs))
792                                 goto fail;
793 
794                         qecp->qec_bursts = qec_get_burst(op->dev.of_node);
795 
796                         qec_init_once(qecp, op);
797 
798                         if (request_irq(op->archdata.irqs[0], qec_interrupt,
799                                         IRQF_SHARED, "qec", (void *) qecp)) {
800                                 printk(KERN_ERR "qec: Can't register irq.\n");
801                                 goto fail;
802                         }
803 
804                         platform_set_drvdata(op, qecp);
805 
806                         qecp->next_module = root_qec_dev;
807                         root_qec_dev = qecp;
808                 }
809         }
810 
811         return qecp;
812 
813 fail:
814         if (qecp->gregs)
815                 of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
816         kfree(qecp);
817         return NULL;
818 }
819 
820 static const struct net_device_ops qec_ops = {
821         .ndo_open               = qe_open,
822         .ndo_stop               = qe_close,
823         .ndo_start_xmit         = qe_start_xmit,
824         .ndo_set_rx_mode        = qe_set_multicast,
825         .ndo_tx_timeout         = qe_tx_timeout,
826         .ndo_change_mtu         = eth_change_mtu,
827         .ndo_set_mac_address    = eth_mac_addr,
828         .ndo_validate_addr      = eth_validate_addr,
829 };
830 
831 static int qec_ether_init(struct platform_device *op)
832 {
833         static unsigned version_printed;
834         struct net_device *dev;
835         struct sunqec *qecp;
836         struct sunqe *qe;
837         int i, res;
838 
839         if (version_printed++ == 0)
840                 printk(KERN_INFO "%s", version);
841 
842         dev = alloc_etherdev(sizeof(struct sunqe));
843         if (!dev)
844                 return -ENOMEM;
845 
846         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
847 
848         qe = netdev_priv(dev);
849 
850         res = -ENODEV;
851 
852         i = of_getintprop_default(op->dev.of_node, "channel#", -1);
853         if (i == -1)
854                 goto fail;
855         qe->channel = i;
856         spin_lock_init(&qe->lock);
857 
858         qecp = get_qec(op);
859         if (!qecp)
860                 goto fail;
861 
862         qecp->qes[qe->channel] = qe;
863         qe->dev = dev;
864         qe->parent = qecp;
865         qe->op = op;
866 
867         res = -ENOMEM;
868         qe->qcregs = of_ioremap(&op->resource[0], 0,
869                                 CREG_REG_SIZE, "QEC Channel Registers");
870         if (!qe->qcregs) {
871                 printk(KERN_ERR "qe: Cannot map channel registers.\n");
872                 goto fail;
873         }
874 
875         qe->mregs = of_ioremap(&op->resource[1], 0,
876                                MREGS_REG_SIZE, "QE MACE Registers");
877         if (!qe->mregs) {
878                 printk(KERN_ERR "qe: Cannot map MACE registers.\n");
879                 goto fail;
880         }
881 
882         qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
883                                           &qe->qblock_dvma, GFP_ATOMIC);
884         qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
885                                          &qe->buffers_dvma, GFP_ATOMIC);
886         if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
887             qe->buffers == NULL || qe->buffers_dvma == 0)
888                 goto fail;
889 
890         /* Stop this QE. */
891         qe_stop(qe);
892 
893         SET_NETDEV_DEV(dev, &op->dev);
894 
895         dev->watchdog_timeo = 5*HZ;
896         dev->irq = op->archdata.irqs[0];
897         dev->dma = 0;
898         dev->ethtool_ops = &qe_ethtool_ops;
899         dev->netdev_ops = &qec_ops;
900 
901         res = register_netdev(dev);
902         if (res)
903                 goto fail;
904 
905         platform_set_drvdata(op, qe);
906 
907         printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
908                dev->dev_addr);
909         return 0;
910 
911 fail:
912         if (qe->qcregs)
913                 of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
914         if (qe->mregs)
915                 of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
916         if (qe->qe_block)
917                 dma_free_coherent(&op->dev, PAGE_SIZE,
918                                   qe->qe_block, qe->qblock_dvma);
919         if (qe->buffers)
920                 dma_free_coherent(&op->dev,
921                                   sizeof(struct sunqe_buffers),
922                                   qe->buffers,
923                                   qe->buffers_dvma);
924 
925         free_netdev(dev);
926 
927         return res;
928 }
929 
930 static int qec_sbus_probe(struct platform_device *op)
931 {
932         return qec_ether_init(op);
933 }
934 
935 static int qec_sbus_remove(struct platform_device *op)
936 {
937         struct sunqe *qp = platform_get_drvdata(op);
938         struct net_device *net_dev = qp->dev;
939 
940         unregister_netdev(net_dev);
941 
942         of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
943         of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
944         dma_free_coherent(&op->dev, PAGE_SIZE,
945                           qp->qe_block, qp->qblock_dvma);
946         dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
947                           qp->buffers, qp->buffers_dvma);
948 
949         free_netdev(net_dev);
950 
951         return 0;
952 }
953 
954 static const struct of_device_id qec_sbus_match[] = {
955         {
956                 .name = "qe",
957         },
958         {},
959 };
960 
961 MODULE_DEVICE_TABLE(of, qec_sbus_match);
962 
963 static struct platform_driver qec_sbus_driver = {
964         .driver = {
965                 .name = "qec",
966                 .owner = THIS_MODULE,
967                 .of_match_table = qec_sbus_match,
968         },
969         .probe          = qec_sbus_probe,
970         .remove         = qec_sbus_remove,
971 };
972 
973 static int __init qec_init(void)
974 {
975         return platform_driver_register(&qec_sbus_driver);
976 }
977 
978 static void __exit qec_exit(void)
979 {
980         platform_driver_unregister(&qec_sbus_driver);
981 
982         while (root_qec_dev) {
983                 struct sunqec *next = root_qec_dev->next_module;
984                 struct platform_device *op = root_qec_dev->op;
985 
986                 free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
987                 of_iounmap(&op->resource[0], root_qec_dev->gregs,
988                            GLOB_REG_SIZE);
989                 kfree(root_qec_dev);
990 
991                 root_qec_dev = next;
992         }
993 }
994 
995 module_init(qec_init);
996 module_exit(qec_exit);
997 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us