Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/drivers/net/ethernet/seeq/sgiseeq.c

  1 /*
  2  * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
  3  *
  4  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  5  */
  6 
  7 #undef DEBUG
  8 
  9 #include <linux/dma-mapping.h>
 10 #include <linux/kernel.h>
 11 #include <linux/module.h>
 12 #include <linux/slab.h>
 13 #include <linux/errno.h>
 14 #include <linux/types.h>
 15 #include <linux/interrupt.h>
 16 #include <linux/string.h>
 17 #include <linux/delay.h>
 18 #include <linux/netdevice.h>
 19 #include <linux/platform_device.h>
 20 #include <linux/etherdevice.h>
 21 #include <linux/skbuff.h>
 22 
 23 #include <asm/sgi/hpc3.h>
 24 #include <asm/sgi/ip22.h>
 25 #include <asm/sgi/seeq.h>
 26 
 27 #include "sgiseeq.h"
 28 
 29 static char *sgiseeqstr = "SGI Seeq8003";
 30 
 31 /*
 32  * If you want speed, you do something silly, it always has worked for me.  So,
 33  * with that in mind, I've decided to make this driver look completely like a
 34  * stupid Lance from a driver architecture perspective.  Only difference is that
 35  * here our "ring buffer" looks and acts like a real Lance one does but is
 36  * laid out like how the HPC DMA and the Seeq want it to.  You'd be surprised
 37  * how a stupid idea like this can pay off in performance, not to mention
 38  * making this driver 2,000 times easier to write. ;-)
 39  */
 40 
 41 /* Tune these if we tend to run out often etc. */
 42 #define SEEQ_RX_BUFFERS  16
 43 #define SEEQ_TX_BUFFERS  16
 44 
 45 #define PKT_BUF_SZ       1584
 46 
 47 #define NEXT_RX(i)  (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
 48 #define NEXT_TX(i)  (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
 49 #define PREV_RX(i)  (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
 50 #define PREV_TX(i)  (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
 51 
 52 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
 53                             sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
 54                             sp->tx_old - sp->tx_new - 1)
 55 
 56 #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma +                                 \
 57                                   (dma_addr_t)((unsigned long)(v) -            \
 58                                                (unsigned long)((sp)->rx_desc)))
 59 
 60 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
 61  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
 62  */
 63 static int rx_copybreak = 100;
 64 
 65 #define PAD_SIZE    (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
 66 
 67 struct sgiseeq_rx_desc {
 68         volatile struct hpc_dma_desc rdma;
 69         u8 padding[PAD_SIZE];
 70         struct sk_buff *skb;
 71 };
 72 
 73 struct sgiseeq_tx_desc {
 74         volatile struct hpc_dma_desc tdma;
 75         u8 padding[PAD_SIZE];
 76         struct sk_buff *skb;
 77 };
 78 
 79 /*
 80  * Warning: This structure is laid out in a certain way because HPC dma
 81  *          descriptors must be 8-byte aligned.  So don't touch this without
 82  *          some care.
 83  */
 84 struct sgiseeq_init_block { /* Note the name ;-) */
 85         struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
 86         struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
 87 };
 88 
 89 struct sgiseeq_private {
 90         struct sgiseeq_init_block *srings;
 91         dma_addr_t srings_dma;
 92 
 93         /* Ptrs to the descriptors in uncached space. */
 94         struct sgiseeq_rx_desc *rx_desc;
 95         struct sgiseeq_tx_desc *tx_desc;
 96 
 97         char *name;
 98         struct hpc3_ethregs *hregs;
 99         struct sgiseeq_regs *sregs;
100 
101         /* Ring entry counters. */
102         unsigned int rx_new, tx_new;
103         unsigned int rx_old, tx_old;
104 
105         int is_edlc;
106         unsigned char control;
107         unsigned char mode;
108 
109         spinlock_t tx_lock;
110 };
111 
112 static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
113 {
114         dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
115                        DMA_FROM_DEVICE);
116 }
117 
118 static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
119 {
120         dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
121                        DMA_TO_DEVICE);
122 }
123 
124 static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
125 {
126         hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
127         udelay(20);
128         hregs->reset = 0;
129 }
130 
131 static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
132                                        struct sgiseeq_regs *sregs)
133 {
134         hregs->rx_ctrl = hregs->tx_ctrl = 0;
135         hpc3_eth_reset(hregs);
136 }
137 
138 #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
139                        SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
140 
141 static inline void seeq_go(struct sgiseeq_private *sp,
142                            struct hpc3_ethregs *hregs,
143                            struct sgiseeq_regs *sregs)
144 {
145         sregs->rstat = sp->mode | RSTAT_GO_BITS;
146         hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
147 }
148 
149 static inline void __sgiseeq_set_mac_address(struct net_device *dev)
150 {
151         struct sgiseeq_private *sp = netdev_priv(dev);
152         struct sgiseeq_regs *sregs = sp->sregs;
153         int i;
154 
155         sregs->tstat = SEEQ_TCMD_RB0;
156         for (i = 0; i < 6; i++)
157                 sregs->rw.eth_addr[i] = dev->dev_addr[i];
158 }
159 
160 static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
161 {
162         struct sgiseeq_private *sp = netdev_priv(dev);
163         struct sockaddr *sa = addr;
164 
165         memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
166 
167         spin_lock_irq(&sp->tx_lock);
168         __sgiseeq_set_mac_address(dev);
169         spin_unlock_irq(&sp->tx_lock);
170 
171         return 0;
172 }
173 
174 #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
175 #define RCNTCFG_INIT  (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
176 #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
177 
178 static int seeq_init_ring(struct net_device *dev)
179 {
180         struct sgiseeq_private *sp = netdev_priv(dev);
181         int i;
182 
183         netif_stop_queue(dev);
184         sp->rx_new = sp->tx_new = 0;
185         sp->rx_old = sp->tx_old = 0;
186 
187         __sgiseeq_set_mac_address(dev);
188 
189         /* Setup tx ring. */
190         for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
191                 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
192                 dma_sync_desc_dev(dev, &sp->tx_desc[i]);
193         }
194 
195         /* And now the rx ring. */
196         for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
197                 if (!sp->rx_desc[i].skb) {
198                         dma_addr_t dma_addr;
199                         struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
200 
201                         if (skb == NULL)
202                                 return -ENOMEM;
203                         skb_reserve(skb, 2);
204                         dma_addr = dma_map_single(dev->dev.parent,
205                                                   skb->data - 2,
206                                                   PKT_BUF_SZ, DMA_FROM_DEVICE);
207                         sp->rx_desc[i].skb = skb;
208                         sp->rx_desc[i].rdma.pbuf = dma_addr;
209                 }
210                 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
211                 dma_sync_desc_dev(dev, &sp->rx_desc[i]);
212         }
213         sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
214         dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
215         return 0;
216 }
217 
218 static void seeq_purge_ring(struct net_device *dev)
219 {
220         struct sgiseeq_private *sp = netdev_priv(dev);
221         int i;
222 
223         /* clear tx ring. */
224         for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
225                 if (sp->tx_desc[i].skb) {
226                         dev_kfree_skb(sp->tx_desc[i].skb);
227                         sp->tx_desc[i].skb = NULL;
228                 }
229         }
230 
231         /* And now the rx ring. */
232         for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
233                 if (sp->rx_desc[i].skb) {
234                         dev_kfree_skb(sp->rx_desc[i].skb);
235                         sp->rx_desc[i].skb = NULL;
236                 }
237         }
238 }
239 
240 #ifdef DEBUG
241 static struct sgiseeq_private *gpriv;
242 static struct net_device *gdev;
243 
244 static void sgiseeq_dump_rings(void)
245 {
246         static int once;
247         struct sgiseeq_rx_desc *r = gpriv->rx_desc;
248         struct sgiseeq_tx_desc *t = gpriv->tx_desc;
249         struct hpc3_ethregs *hregs = gpriv->hregs;
250         int i;
251 
252         if (once)
253                 return;
254         once++;
255         printk("RING DUMP:\n");
256         for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
257                 printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
258                        i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
259                        r[i].rdma.pnext);
260                 i += 1;
261                 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
262                        i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
263                        r[i].rdma.pnext);
264         }
265         for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
266                 printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
267                        i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
268                        t[i].tdma.pnext);
269                 i += 1;
270                 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
271                        i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
272                        t[i].tdma.pnext);
273         }
274         printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
275                gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
276         printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
277                hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
278         printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
279                hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
280 }
281 #endif
282 
283 #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
284 #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
285 
286 static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
287                      struct sgiseeq_regs *sregs)
288 {
289         struct hpc3_ethregs *hregs = sp->hregs;
290         int err;
291 
292         reset_hpc3_and_seeq(hregs, sregs);
293         err = seeq_init_ring(dev);
294         if (err)
295                 return err;
296 
297         /* Setup to field the proper interrupt types. */
298         if (sp->is_edlc) {
299                 sregs->tstat = TSTAT_INIT_EDLC;
300                 sregs->rw.wregs.control = sp->control;
301                 sregs->rw.wregs.frame_gap = 0;
302         } else {
303                 sregs->tstat = TSTAT_INIT_SEEQ;
304         }
305 
306         hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
307         hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
308 
309         seeq_go(sp, hregs, sregs);
310         return 0;
311 }
312 
313 static void record_rx_errors(struct net_device *dev, unsigned char status)
314 {
315         if (status & SEEQ_RSTAT_OVERF ||
316             status & SEEQ_RSTAT_SFRAME)
317                 dev->stats.rx_over_errors++;
318         if (status & SEEQ_RSTAT_CERROR)
319                 dev->stats.rx_crc_errors++;
320         if (status & SEEQ_RSTAT_DERROR)
321                 dev->stats.rx_frame_errors++;
322         if (status & SEEQ_RSTAT_REOF)
323                 dev->stats.rx_errors++;
324 }
325 
326 static inline void rx_maybe_restart(struct sgiseeq_private *sp,
327                                     struct hpc3_ethregs *hregs,
328                                     struct sgiseeq_regs *sregs)
329 {
330         if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
331                 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
332                 seeq_go(sp, hregs, sregs);
333         }
334 }
335 
336 static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
337                               struct hpc3_ethregs *hregs,
338                               struct sgiseeq_regs *sregs)
339 {
340         struct sgiseeq_rx_desc *rd;
341         struct sk_buff *skb = NULL;
342         struct sk_buff *newskb;
343         unsigned char pkt_status;
344         int len = 0;
345         unsigned int orig_end = PREV_RX(sp->rx_new);
346 
347         /* Service every received packet. */
348         rd = &sp->rx_desc[sp->rx_new];
349         dma_sync_desc_cpu(dev, rd);
350         while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
351                 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
352                 dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
353                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
354                 pkt_status = rd->skb->data[len];
355                 if (pkt_status & SEEQ_RSTAT_FIG) {
356                         /* Packet is OK. */
357                         /* We don't want to receive our own packets */
358                         if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
359                                 if (len > rx_copybreak) {
360                                         skb = rd->skb;
361                                         newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
362                                         if (!newskb) {
363                                                 newskb = skb;
364                                                 skb = NULL;
365                                                 goto memory_squeeze;
366                                         }
367                                         skb_reserve(newskb, 2);
368                                 } else {
369                                         skb = netdev_alloc_skb_ip_align(dev, len);
370                                         if (skb)
371                                                 skb_copy_to_linear_data(skb, rd->skb->data, len);
372 
373                                         newskb = rd->skb;
374                                 }
375 memory_squeeze:
376                                 if (skb) {
377                                         skb_put(skb, len);
378                                         skb->protocol = eth_type_trans(skb, dev);
379                                         netif_rx(skb);
380                                         dev->stats.rx_packets++;
381                                         dev->stats.rx_bytes += len;
382                                 } else {
383                                         dev->stats.rx_dropped++;
384                                 }
385                         } else {
386                                 /* Silently drop my own packets */
387                                 newskb = rd->skb;
388                         }
389                 } else {
390                         record_rx_errors(dev, pkt_status);
391                         newskb = rd->skb;
392                 }
393                 rd->skb = newskb;
394                 rd->rdma.pbuf = dma_map_single(dev->dev.parent,
395                                                newskb->data - 2,
396                                                PKT_BUF_SZ, DMA_FROM_DEVICE);
397 
398                 /* Return the entry to the ring pool. */
399                 rd->rdma.cntinfo = RCNTINFO_INIT;
400                 sp->rx_new = NEXT_RX(sp->rx_new);
401                 dma_sync_desc_dev(dev, rd);
402                 rd = &sp->rx_desc[sp->rx_new];
403                 dma_sync_desc_cpu(dev, rd);
404         }
405         dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
406         sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
407         dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
408         dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
409         sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
410         dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
411         rx_maybe_restart(sp, hregs, sregs);
412 }
413 
414 static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
415                                              struct sgiseeq_regs *sregs)
416 {
417         if (sp->is_edlc) {
418                 sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
419                 sregs->rw.wregs.control = sp->control;
420         }
421 }
422 
423 static inline void kick_tx(struct net_device *dev,
424                            struct sgiseeq_private *sp,
425                            struct hpc3_ethregs *hregs)
426 {
427         struct sgiseeq_tx_desc *td;
428         int i = sp->tx_old;
429 
430         /* If the HPC aint doin nothin, and there are more packets
431          * with ETXD cleared and XIU set we must make very certain
432          * that we restart the HPC else we risk locking up the
433          * adapter.  The following code is only safe iff the HPCDMA
434          * is not active!
435          */
436         td = &sp->tx_desc[i];
437         dma_sync_desc_cpu(dev, td);
438         while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
439               (HPCDMA_XIU | HPCDMA_ETXD)) {
440                 i = NEXT_TX(i);
441                 td = &sp->tx_desc[i];
442                 dma_sync_desc_cpu(dev, td);
443         }
444         if (td->tdma.cntinfo & HPCDMA_XIU) {
445                 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
446                 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
447         }
448 }
449 
450 static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
451                               struct hpc3_ethregs *hregs,
452                               struct sgiseeq_regs *sregs)
453 {
454         struct sgiseeq_tx_desc *td;
455         unsigned long status = hregs->tx_ctrl;
456         int j;
457 
458         tx_maybe_reset_collisions(sp, sregs);
459 
460         if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
461                 /* Oops, HPC detected some sort of error. */
462                 if (status & SEEQ_TSTAT_R16)
463                         dev->stats.tx_aborted_errors++;
464                 if (status & SEEQ_TSTAT_UFLOW)
465                         dev->stats.tx_fifo_errors++;
466                 if (status & SEEQ_TSTAT_LCLS)
467                         dev->stats.collisions++;
468         }
469 
470         /* Ack 'em... */
471         for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
472                 td = &sp->tx_desc[j];
473 
474                 dma_sync_desc_cpu(dev, td);
475                 if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
476                         break;
477                 if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
478                         if (!(status & HPC3_ETXCTRL_ACTIVE)) {
479                                 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
480                                 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
481                         }
482                         break;
483                 }
484                 dev->stats.tx_packets++;
485                 sp->tx_old = NEXT_TX(sp->tx_old);
486                 td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
487                 td->tdma.cntinfo |= HPCDMA_EOX;
488                 if (td->skb) {
489                         dev_kfree_skb_any(td->skb);
490                         td->skb = NULL;
491                 }
492                 dma_sync_desc_dev(dev, td);
493         }
494 }
495 
496 static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
497 {
498         struct net_device *dev = (struct net_device *) dev_id;
499         struct sgiseeq_private *sp = netdev_priv(dev);
500         struct hpc3_ethregs *hregs = sp->hregs;
501         struct sgiseeq_regs *sregs = sp->sregs;
502 
503         spin_lock(&sp->tx_lock);
504 
505         /* Ack the IRQ and set software state. */
506         hregs->reset = HPC3_ERST_CLRIRQ;
507 
508         /* Always check for received packets. */
509         sgiseeq_rx(dev, sp, hregs, sregs);
510 
511         /* Only check for tx acks if we have something queued. */
512         if (sp->tx_old != sp->tx_new)
513                 sgiseeq_tx(dev, sp, hregs, sregs);
514 
515         if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
516                 netif_wake_queue(dev);
517         }
518         spin_unlock(&sp->tx_lock);
519 
520         return IRQ_HANDLED;
521 }
522 
523 static int sgiseeq_open(struct net_device *dev)
524 {
525         struct sgiseeq_private *sp = netdev_priv(dev);
526         struct sgiseeq_regs *sregs = sp->sregs;
527         unsigned int irq = dev->irq;
528         int err;
529 
530         if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
531                 printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
532                 return -EAGAIN;
533         }
534 
535         err = init_seeq(dev, sp, sregs);
536         if (err)
537                 goto out_free_irq;
538 
539         netif_start_queue(dev);
540 
541         return 0;
542 
543 out_free_irq:
544         free_irq(irq, dev);
545 
546         return err;
547 }
548 
549 static int sgiseeq_close(struct net_device *dev)
550 {
551         struct sgiseeq_private *sp = netdev_priv(dev);
552         struct sgiseeq_regs *sregs = sp->sregs;
553         unsigned int irq = dev->irq;
554 
555         netif_stop_queue(dev);
556 
557         /* Shutdown the Seeq. */
558         reset_hpc3_and_seeq(sp->hregs, sregs);
559         free_irq(irq, dev);
560         seeq_purge_ring(dev);
561 
562         return 0;
563 }
564 
565 static inline int sgiseeq_reset(struct net_device *dev)
566 {
567         struct sgiseeq_private *sp = netdev_priv(dev);
568         struct sgiseeq_regs *sregs = sp->sregs;
569         int err;
570 
571         err = init_seeq(dev, sp, sregs);
572         if (err)
573                 return err;
574 
575         dev->trans_start = jiffies; /* prevent tx timeout */
576         netif_wake_queue(dev);
577 
578         return 0;
579 }
580 
581 static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
582 {
583         struct sgiseeq_private *sp = netdev_priv(dev);
584         struct hpc3_ethregs *hregs = sp->hregs;
585         unsigned long flags;
586         struct sgiseeq_tx_desc *td;
587         int len, entry;
588 
589         spin_lock_irqsave(&sp->tx_lock, flags);
590 
591         /* Setup... */
592         len = skb->len;
593         if (len < ETH_ZLEN) {
594                 if (skb_padto(skb, ETH_ZLEN)) {
595                         spin_unlock_irqrestore(&sp->tx_lock, flags);
596                         return NETDEV_TX_OK;
597                 }
598                 len = ETH_ZLEN;
599         }
600 
601         dev->stats.tx_bytes += len;
602         entry = sp->tx_new;
603         td = &sp->tx_desc[entry];
604         dma_sync_desc_cpu(dev, td);
605 
606         /* Create entry.  There are so many races with adding a new
607          * descriptor to the chain:
608          * 1) Assume that the HPC is off processing a DMA chain while
609          *    we are changing all of the following.
610          * 2) Do no allow the HPC to look at a new descriptor until
611          *    we have completely set up it's state.  This means, do
612          *    not clear HPCDMA_EOX in the current last descritptor
613          *    until the one we are adding looks consistent and could
614          *    be processes right now.
615          * 3) The tx interrupt code must notice when we've added a new
616          *    entry and the HPC got to the end of the chain before we
617          *    added this new entry and restarted it.
618          */
619         td->skb = skb;
620         td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
621                                        len, DMA_TO_DEVICE);
622         td->tdma.cntinfo = (len & HPCDMA_BCNT) |
623                            HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
624         dma_sync_desc_dev(dev, td);
625         if (sp->tx_old != sp->tx_new) {
626                 struct sgiseeq_tx_desc *backend;
627 
628                 backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
629                 dma_sync_desc_cpu(dev, backend);
630                 backend->tdma.cntinfo &= ~HPCDMA_EOX;
631                 dma_sync_desc_dev(dev, backend);
632         }
633         sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
634 
635         /* Maybe kick the HPC back into motion. */
636         if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
637                 kick_tx(dev, sp, hregs);
638 
639         if (!TX_BUFFS_AVAIL(sp))
640                 netif_stop_queue(dev);
641         spin_unlock_irqrestore(&sp->tx_lock, flags);
642 
643         return NETDEV_TX_OK;
644 }
645 
646 static void timeout(struct net_device *dev)
647 {
648         printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
649         sgiseeq_reset(dev);
650 
651         dev->trans_start = jiffies; /* prevent tx timeout */
652         netif_wake_queue(dev);
653 }
654 
655 static void sgiseeq_set_multicast(struct net_device *dev)
656 {
657         struct sgiseeq_private *sp = netdev_priv(dev);
658         unsigned char oldmode = sp->mode;
659 
660         if(dev->flags & IFF_PROMISC)
661                 sp->mode = SEEQ_RCMD_RANY;
662         else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
663                 sp->mode = SEEQ_RCMD_RBMCAST;
664         else
665                 sp->mode = SEEQ_RCMD_RBCAST;
666 
667         /* XXX I know this sucks, but is there a better way to reprogram
668          * XXX the receiver? At least, this shouldn't happen too often.
669          */
670 
671         if (oldmode != sp->mode)
672                 sgiseeq_reset(dev);
673 }
674 
675 static inline void setup_tx_ring(struct net_device *dev,
676                                  struct sgiseeq_tx_desc *buf,
677                                  int nbufs)
678 {
679         struct sgiseeq_private *sp = netdev_priv(dev);
680         int i = 0;
681 
682         while (i < (nbufs - 1)) {
683                 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
684                 buf[i].tdma.pbuf = 0;
685                 dma_sync_desc_dev(dev, &buf[i]);
686                 i++;
687         }
688         buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
689         dma_sync_desc_dev(dev, &buf[i]);
690 }
691 
692 static inline void setup_rx_ring(struct net_device *dev,
693                                  struct sgiseeq_rx_desc *buf,
694                                  int nbufs)
695 {
696         struct sgiseeq_private *sp = netdev_priv(dev);
697         int i = 0;
698 
699         while (i < (nbufs - 1)) {
700                 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
701                 buf[i].rdma.pbuf = 0;
702                 dma_sync_desc_dev(dev, &buf[i]);
703                 i++;
704         }
705         buf[i].rdma.pbuf = 0;
706         buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
707         dma_sync_desc_dev(dev, &buf[i]);
708 }
709 
710 static const struct net_device_ops sgiseeq_netdev_ops = {
711         .ndo_open               = sgiseeq_open,
712         .ndo_stop               = sgiseeq_close,
713         .ndo_start_xmit         = sgiseeq_start_xmit,
714         .ndo_tx_timeout         = timeout,
715         .ndo_set_rx_mode        = sgiseeq_set_multicast,
716         .ndo_set_mac_address    = sgiseeq_set_mac_address,
717         .ndo_change_mtu         = eth_change_mtu,
718         .ndo_validate_addr      = eth_validate_addr,
719 };
720 
721 static int sgiseeq_probe(struct platform_device *pdev)
722 {
723         struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
724         struct hpc3_regs *hpcregs = pd->hpc;
725         struct sgiseeq_init_block *sr;
726         unsigned int irq = pd->irq;
727         struct sgiseeq_private *sp;
728         struct net_device *dev;
729         int err;
730 
731         dev = alloc_etherdev(sizeof (struct sgiseeq_private));
732         if (!dev) {
733                 err = -ENOMEM;
734                 goto err_out;
735         }
736 
737         platform_set_drvdata(pdev, dev);
738         sp = netdev_priv(dev);
739 
740         /* Make private data page aligned */
741         sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
742                                 &sp->srings_dma, GFP_KERNEL);
743         if (!sr) {
744                 printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
745                 err = -ENOMEM;
746                 goto err_out_free_dev;
747         }
748         sp->srings = sr;
749         sp->rx_desc = sp->srings->rxvector;
750         sp->tx_desc = sp->srings->txvector;
751         spin_lock_init(&sp->tx_lock);
752 
753         /* A couple calculations now, saves many cycles later. */
754         setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
755         setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
756 
757         memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
758 
759 #ifdef DEBUG
760         gpriv = sp;
761         gdev = dev;
762 #endif
763         sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
764         sp->hregs = &hpcregs->ethregs;
765         sp->name = sgiseeqstr;
766         sp->mode = SEEQ_RCMD_RBCAST;
767 
768         /* Setup PIO and DMA transfer timing */
769         sp->hregs->pconfig = 0x161;
770         sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
771                              HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
772 
773         /* Setup PIO and DMA transfer timing */
774         sp->hregs->pconfig = 0x161;
775         sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
776                              HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
777 
778         /* Reset the chip. */
779         hpc3_eth_reset(sp->hregs);
780 
781         sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
782         if (sp->is_edlc)
783                 sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
784                               SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
785                               SEEQ_CTRL_ENCARR;
786 
787         dev->netdev_ops         = &sgiseeq_netdev_ops;
788         dev->watchdog_timeo     = (200 * HZ) / 1000;
789         dev->irq                = irq;
790 
791         if (register_netdev(dev)) {
792                 printk(KERN_ERR "Sgiseeq: Cannot register net device, "
793                        "aborting.\n");
794                 err = -ENODEV;
795                 goto err_out_free_page;
796         }
797 
798         printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
799 
800         return 0;
801 
802 err_out_free_page:
803         free_page((unsigned long) sp->srings);
804 err_out_free_dev:
805         free_netdev(dev);
806 
807 err_out:
808         return err;
809 }
810 
811 static int __exit sgiseeq_remove(struct platform_device *pdev)
812 {
813         struct net_device *dev = platform_get_drvdata(pdev);
814         struct sgiseeq_private *sp = netdev_priv(dev);
815 
816         unregister_netdev(dev);
817         dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
818                              sp->srings_dma);
819         free_netdev(dev);
820 
821         return 0;
822 }
823 
824 static struct platform_driver sgiseeq_driver = {
825         .probe  = sgiseeq_probe,
826         .remove = __exit_p(sgiseeq_remove),
827         .driver = {
828                 .name   = "sgiseeq",
829                 .owner  = THIS_MODULE,
830         }
831 };
832 
833 module_platform_driver(sgiseeq_driver);
834 
835 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
836 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
837 MODULE_LICENSE("GPL");
838 MODULE_ALIAS("platform:sgiseeq");
839 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us