Version:  2.0.40 2.2.26 2.4.37 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0

Linux/drivers/net/ethernet/cadence/macb.c

  1 /*
  2  * Cadence MACB/GEM Ethernet Controller driver
  3  *
  4  * Copyright (C) 2004-2006 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 
 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12 #include <linux/clk.h>
 13 #include <linux/module.h>
 14 #include <linux/moduleparam.h>
 15 #include <linux/kernel.h>
 16 #include <linux/types.h>
 17 #include <linux/circ_buf.h>
 18 #include <linux/slab.h>
 19 #include <linux/init.h>
 20 #include <linux/io.h>
 21 #include <linux/gpio.h>
 22 #include <linux/interrupt.h>
 23 #include <linux/netdevice.h>
 24 #include <linux/etherdevice.h>
 25 #include <linux/dma-mapping.h>
 26 #include <linux/platform_data/macb.h>
 27 #include <linux/platform_device.h>
 28 #include <linux/phy.h>
 29 #include <linux/of.h>
 30 #include <linux/of_device.h>
 31 #include <linux/of_mdio.h>
 32 #include <linux/of_net.h>
 33 
 34 #include "macb.h"
 35 
 36 #define MACB_RX_BUFFER_SIZE     128
 37 #define RX_BUFFER_MULTIPLE      64  /* bytes */
 38 #define RX_RING_SIZE            512 /* must be power of 2 */
 39 #define RX_RING_BYTES           (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 40 
 41 #define TX_RING_SIZE            128 /* must be power of 2 */
 42 #define TX_RING_BYTES           (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 43 
 44 /* level of occupied TX descriptors under which we wake up TX process */
 45 #define MACB_TX_WAKEUP_THRESH   (3 * TX_RING_SIZE / 4)
 46 
 47 #define MACB_RX_INT_FLAGS       (MACB_BIT(RCOMP) | MACB_BIT(RXUBR)      \
 48                                  | MACB_BIT(ISR_ROVR))
 49 #define MACB_TX_ERR_FLAGS       (MACB_BIT(ISR_TUND)                     \
 50                                         | MACB_BIT(ISR_RLE)             \
 51                                         | MACB_BIT(TXERR))
 52 #define MACB_TX_INT_FLAGS       (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 53 
 54 #define MACB_MAX_TX_LEN         ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 55 #define GEM_MAX_TX_LEN          ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 56 
 57 /*
 58  * Graceful stop timeouts in us. We should allow up to
 59  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 60  */
 61 #define MACB_HALT_TIMEOUT       1230
 62 
 63 /* Ring buffer accessors */
 64 static unsigned int macb_tx_ring_wrap(unsigned int index)
 65 {
 66         return index & (TX_RING_SIZE - 1);
 67 }
 68 
 69 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
 70                                           unsigned int index)
 71 {
 72         return &queue->tx_ring[macb_tx_ring_wrap(index)];
 73 }
 74 
 75 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
 76                                        unsigned int index)
 77 {
 78         return &queue->tx_skb[macb_tx_ring_wrap(index)];
 79 }
 80 
 81 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
 82 {
 83         dma_addr_t offset;
 84 
 85         offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
 86 
 87         return queue->tx_ring_dma + offset;
 88 }
 89 
 90 static unsigned int macb_rx_ring_wrap(unsigned int index)
 91 {
 92         return index & (RX_RING_SIZE - 1);
 93 }
 94 
 95 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
 96 {
 97         return &bp->rx_ring[macb_rx_ring_wrap(index)];
 98 }
 99 
100 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
101 {
102         return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
103 }
104 
105 void macb_set_hwaddr(struct macb *bp)
106 {
107         u32 bottom;
108         u16 top;
109 
110         bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
111         macb_or_gem_writel(bp, SA1B, bottom);
112         top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
113         macb_or_gem_writel(bp, SA1T, top);
114 
115         /* Clear unused address register sets */
116         macb_or_gem_writel(bp, SA2B, 0);
117         macb_or_gem_writel(bp, SA2T, 0);
118         macb_or_gem_writel(bp, SA3B, 0);
119         macb_or_gem_writel(bp, SA3T, 0);
120         macb_or_gem_writel(bp, SA4B, 0);
121         macb_or_gem_writel(bp, SA4T, 0);
122 }
123 EXPORT_SYMBOL_GPL(macb_set_hwaddr);
124 
125 void macb_get_hwaddr(struct macb *bp)
126 {
127         struct macb_platform_data *pdata;
128         u32 bottom;
129         u16 top;
130         u8 addr[6];
131         int i;
132 
133         pdata = dev_get_platdata(&bp->pdev->dev);
134 
135         /* Check all 4 address register for vaild address */
136         for (i = 0; i < 4; i++) {
137                 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
138                 top = macb_or_gem_readl(bp, SA1T + i * 8);
139 
140                 if (pdata && pdata->rev_eth_addr) {
141                         addr[5] = bottom & 0xff;
142                         addr[4] = (bottom >> 8) & 0xff;
143                         addr[3] = (bottom >> 16) & 0xff;
144                         addr[2] = (bottom >> 24) & 0xff;
145                         addr[1] = top & 0xff;
146                         addr[0] = (top & 0xff00) >> 8;
147                 } else {
148                         addr[0] = bottom & 0xff;
149                         addr[1] = (bottom >> 8) & 0xff;
150                         addr[2] = (bottom >> 16) & 0xff;
151                         addr[3] = (bottom >> 24) & 0xff;
152                         addr[4] = top & 0xff;
153                         addr[5] = (top >> 8) & 0xff;
154                 }
155 
156                 if (is_valid_ether_addr(addr)) {
157                         memcpy(bp->dev->dev_addr, addr, sizeof(addr));
158                         return;
159                 }
160         }
161 
162         netdev_info(bp->dev, "invalid hw address, using random\n");
163         eth_hw_addr_random(bp->dev);
164 }
165 EXPORT_SYMBOL_GPL(macb_get_hwaddr);
166 
167 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
168 {
169         struct macb *bp = bus->priv;
170         int value;
171 
172         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
173                               | MACB_BF(RW, MACB_MAN_READ)
174                               | MACB_BF(PHYA, mii_id)
175                               | MACB_BF(REGA, regnum)
176                               | MACB_BF(CODE, MACB_MAN_CODE)));
177 
178         /* wait for end of transfer */
179         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
180                 cpu_relax();
181 
182         value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
183 
184         return value;
185 }
186 
187 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
188                            u16 value)
189 {
190         struct macb *bp = bus->priv;
191 
192         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
193                               | MACB_BF(RW, MACB_MAN_WRITE)
194                               | MACB_BF(PHYA, mii_id)
195                               | MACB_BF(REGA, regnum)
196                               | MACB_BF(CODE, MACB_MAN_CODE)
197                               | MACB_BF(DATA, value)));
198 
199         /* wait for end of transfer */
200         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
201                 cpu_relax();
202 
203         return 0;
204 }
205 
206 /**
207  * macb_set_tx_clk() - Set a clock to a new frequency
208  * @clk         Pointer to the clock to change
209  * @rate        New frequency in Hz
210  * @dev         Pointer to the struct net_device
211  */
212 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
213 {
214         long ferr, rate, rate_rounded;
215 
216         switch (speed) {
217         case SPEED_10:
218                 rate = 2500000;
219                 break;
220         case SPEED_100:
221                 rate = 25000000;
222                 break;
223         case SPEED_1000:
224                 rate = 125000000;
225                 break;
226         default:
227                 return;
228         }
229 
230         rate_rounded = clk_round_rate(clk, rate);
231         if (rate_rounded < 0)
232                 return;
233 
234         /* RGMII allows 50 ppm frequency error. Test and warn if this limit
235          * is not satisfied.
236          */
237         ferr = abs(rate_rounded - rate);
238         ferr = DIV_ROUND_UP(ferr, rate / 100000);
239         if (ferr > 5)
240                 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
241                                 rate);
242 
243         if (clk_set_rate(clk, rate_rounded))
244                 netdev_err(dev, "adjusting tx_clk failed.\n");
245 }
246 
247 static void macb_handle_link_change(struct net_device *dev)
248 {
249         struct macb *bp = netdev_priv(dev);
250         struct phy_device *phydev = bp->phy_dev;
251         unsigned long flags;
252 
253         int status_change = 0;
254 
255         spin_lock_irqsave(&bp->lock, flags);
256 
257         if (phydev->link) {
258                 if ((bp->speed != phydev->speed) ||
259                     (bp->duplex != phydev->duplex)) {
260                         u32 reg;
261 
262                         reg = macb_readl(bp, NCFGR);
263                         reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
264                         if (macb_is_gem(bp))
265                                 reg &= ~GEM_BIT(GBE);
266 
267                         if (phydev->duplex)
268                                 reg |= MACB_BIT(FD);
269                         if (phydev->speed == SPEED_100)
270                                 reg |= MACB_BIT(SPD);
271                         if (phydev->speed == SPEED_1000 &&
272                             bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
273                                 reg |= GEM_BIT(GBE);
274 
275                         macb_or_gem_writel(bp, NCFGR, reg);
276 
277                         bp->speed = phydev->speed;
278                         bp->duplex = phydev->duplex;
279                         status_change = 1;
280                 }
281         }
282 
283         if (phydev->link != bp->link) {
284                 if (!phydev->link) {
285                         bp->speed = 0;
286                         bp->duplex = -1;
287                 }
288                 bp->link = phydev->link;
289 
290                 status_change = 1;
291         }
292 
293         spin_unlock_irqrestore(&bp->lock, flags);
294 
295         if (!IS_ERR(bp->tx_clk))
296                 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
297 
298         if (status_change) {
299                 if (phydev->link) {
300                         netif_carrier_on(dev);
301                         netdev_info(dev, "link up (%d/%s)\n",
302                                     phydev->speed,
303                                     phydev->duplex == DUPLEX_FULL ?
304                                     "Full" : "Half");
305                 } else {
306                         netif_carrier_off(dev);
307                         netdev_info(dev, "link down\n");
308                 }
309         }
310 }
311 
312 /* based on au1000_eth. c*/
313 static int macb_mii_probe(struct net_device *dev)
314 {
315         struct macb *bp = netdev_priv(dev);
316         struct macb_platform_data *pdata;
317         struct phy_device *phydev;
318         int phy_irq;
319         int ret;
320 
321         phydev = phy_find_first(bp->mii_bus);
322         if (!phydev) {
323                 netdev_err(dev, "no PHY found\n");
324                 return -ENXIO;
325         }
326 
327         pdata = dev_get_platdata(&bp->pdev->dev);
328         if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
329                 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
330                 if (!ret) {
331                         phy_irq = gpio_to_irq(pdata->phy_irq_pin);
332                         phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
333                 }
334         }
335 
336         /* attach the mac to the phy */
337         ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
338                                  bp->phy_interface);
339         if (ret) {
340                 netdev_err(dev, "Could not attach to PHY\n");
341                 return ret;
342         }
343 
344         /* mask with MAC supported features */
345         if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
346                 phydev->supported &= PHY_GBIT_FEATURES;
347         else
348                 phydev->supported &= PHY_BASIC_FEATURES;
349 
350         phydev->advertising = phydev->supported;
351 
352         bp->link = 0;
353         bp->speed = 0;
354         bp->duplex = -1;
355         bp->phy_dev = phydev;
356 
357         return 0;
358 }
359 
360 int macb_mii_init(struct macb *bp)
361 {
362         struct macb_platform_data *pdata;
363         struct device_node *np;
364         int err = -ENXIO, i;
365 
366         /* Enable management port */
367         macb_writel(bp, NCR, MACB_BIT(MPE));
368 
369         bp->mii_bus = mdiobus_alloc();
370         if (bp->mii_bus == NULL) {
371                 err = -ENOMEM;
372                 goto err_out;
373         }
374 
375         bp->mii_bus->name = "MACB_mii_bus";
376         bp->mii_bus->read = &macb_mdio_read;
377         bp->mii_bus->write = &macb_mdio_write;
378         snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
379                 bp->pdev->name, bp->pdev->id);
380         bp->mii_bus->priv = bp;
381         bp->mii_bus->parent = &bp->dev->dev;
382         pdata = dev_get_platdata(&bp->pdev->dev);
383 
384         bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
385         if (!bp->mii_bus->irq) {
386                 err = -ENOMEM;
387                 goto err_out_free_mdiobus;
388         }
389 
390         dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
391 
392         np = bp->pdev->dev.of_node;
393         if (np) {
394                 /* try dt phy registration */
395                 err = of_mdiobus_register(bp->mii_bus, np);
396 
397                 /* fallback to standard phy registration if no phy were
398                    found during dt phy registration */
399                 if (!err && !phy_find_first(bp->mii_bus)) {
400                         for (i = 0; i < PHY_MAX_ADDR; i++) {
401                                 struct phy_device *phydev;
402 
403                                 phydev = mdiobus_scan(bp->mii_bus, i);
404                                 if (IS_ERR(phydev)) {
405                                         err = PTR_ERR(phydev);
406                                         break;
407                                 }
408                         }
409 
410                         if (err)
411                                 goto err_out_unregister_bus;
412                 }
413         } else {
414                 for (i = 0; i < PHY_MAX_ADDR; i++)
415                         bp->mii_bus->irq[i] = PHY_POLL;
416 
417                 if (pdata)
418                         bp->mii_bus->phy_mask = pdata->phy_mask;
419 
420                 err = mdiobus_register(bp->mii_bus);
421         }
422 
423         if (err)
424                 goto err_out_free_mdio_irq;
425 
426         err = macb_mii_probe(bp->dev);
427         if (err)
428                 goto err_out_unregister_bus;
429 
430         return 0;
431 
432 err_out_unregister_bus:
433         mdiobus_unregister(bp->mii_bus);
434 err_out_free_mdio_irq:
435         kfree(bp->mii_bus->irq);
436 err_out_free_mdiobus:
437         mdiobus_free(bp->mii_bus);
438 err_out:
439         return err;
440 }
441 EXPORT_SYMBOL_GPL(macb_mii_init);
442 
443 static void macb_update_stats(struct macb *bp)
444 {
445         u32 __iomem *reg = bp->regs + MACB_PFR;
446         u32 *p = &bp->hw_stats.macb.rx_pause_frames;
447         u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
448 
449         WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
450 
451         for(; p < end; p++, reg++)
452                 *p += __raw_readl(reg);
453 }
454 
455 static int macb_halt_tx(struct macb *bp)
456 {
457         unsigned long   halt_time, timeout;
458         u32             status;
459 
460         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
461 
462         timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
463         do {
464                 halt_time = jiffies;
465                 status = macb_readl(bp, TSR);
466                 if (!(status & MACB_BIT(TGO)))
467                         return 0;
468 
469                 usleep_range(10, 250);
470         } while (time_before(halt_time, timeout));
471 
472         return -ETIMEDOUT;
473 }
474 
475 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
476 {
477         if (tx_skb->mapping) {
478                 if (tx_skb->mapped_as_page)
479                         dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
480                                        tx_skb->size, DMA_TO_DEVICE);
481                 else
482                         dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
483                                          tx_skb->size, DMA_TO_DEVICE);
484                 tx_skb->mapping = 0;
485         }
486 
487         if (tx_skb->skb) {
488                 dev_kfree_skb_any(tx_skb->skb);
489                 tx_skb->skb = NULL;
490         }
491 }
492 
493 static void macb_tx_error_task(struct work_struct *work)
494 {
495         struct macb_queue       *queue = container_of(work, struct macb_queue,
496                                                       tx_error_task);
497         struct macb             *bp = queue->bp;
498         struct macb_tx_skb      *tx_skb;
499         struct macb_dma_desc    *desc;
500         struct sk_buff          *skb;
501         unsigned int            tail;
502         unsigned long           flags;
503 
504         netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
505                     (unsigned int)(queue - bp->queues),
506                     queue->tx_tail, queue->tx_head);
507 
508         /* Prevent the queue IRQ handlers from running: each of them may call
509          * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
510          * As explained below, we have to halt the transmission before updating
511          * TBQP registers so we call netif_tx_stop_all_queues() to notify the
512          * network engine about the macb/gem being halted.
513          */
514         spin_lock_irqsave(&bp->lock, flags);
515 
516         /* Make sure nobody is trying to queue up new packets */
517         netif_tx_stop_all_queues(bp->dev);
518 
519         /*
520          * Stop transmission now
521          * (in case we have just queued new packets)
522          * macb/gem must be halted to write TBQP register
523          */
524         if (macb_halt_tx(bp))
525                 /* Just complain for now, reinitializing TX path can be good */
526                 netdev_err(bp->dev, "BUG: halt tx timed out\n");
527 
528         /*
529          * Treat frames in TX queue including the ones that caused the error.
530          * Free transmit buffers in upper layer.
531          */
532         for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
533                 u32     ctrl;
534 
535                 desc = macb_tx_desc(queue, tail);
536                 ctrl = desc->ctrl;
537                 tx_skb = macb_tx_skb(queue, tail);
538                 skb = tx_skb->skb;
539 
540                 if (ctrl & MACB_BIT(TX_USED)) {
541                         /* skb is set for the last buffer of the frame */
542                         while (!skb) {
543                                 macb_tx_unmap(bp, tx_skb);
544                                 tail++;
545                                 tx_skb = macb_tx_skb(queue, tail);
546                                 skb = tx_skb->skb;
547                         }
548 
549                         /* ctrl still refers to the first buffer descriptor
550                          * since it's the only one written back by the hardware
551                          */
552                         if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
553                                 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
554                                             macb_tx_ring_wrap(tail), skb->data);
555                                 bp->stats.tx_packets++;
556                                 bp->stats.tx_bytes += skb->len;
557                         }
558                 } else {
559                         /*
560                          * "Buffers exhausted mid-frame" errors may only happen
561                          * if the driver is buggy, so complain loudly about those.
562                          * Statistics are updated by hardware.
563                          */
564                         if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
565                                 netdev_err(bp->dev,
566                                            "BUG: TX buffers exhausted mid-frame\n");
567 
568                         desc->ctrl = ctrl | MACB_BIT(TX_USED);
569                 }
570 
571                 macb_tx_unmap(bp, tx_skb);
572         }
573 
574         /* Set end of TX queue */
575         desc = macb_tx_desc(queue, 0);
576         desc->addr = 0;
577         desc->ctrl = MACB_BIT(TX_USED);
578 
579         /* Make descriptor updates visible to hardware */
580         wmb();
581 
582         /* Reinitialize the TX desc queue */
583         queue_writel(queue, TBQP, queue->tx_ring_dma);
584         /* Make TX ring reflect state of hardware */
585         queue->tx_head = 0;
586         queue->tx_tail = 0;
587 
588         /* Housework before enabling TX IRQ */
589         macb_writel(bp, TSR, macb_readl(bp, TSR));
590         queue_writel(queue, IER, MACB_TX_INT_FLAGS);
591 
592         /* Now we are ready to start transmission again */
593         netif_tx_start_all_queues(bp->dev);
594         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
595 
596         spin_unlock_irqrestore(&bp->lock, flags);
597 }
598 
599 static void macb_tx_interrupt(struct macb_queue *queue)
600 {
601         unsigned int tail;
602         unsigned int head;
603         u32 status;
604         struct macb *bp = queue->bp;
605         u16 queue_index = queue - bp->queues;
606 
607         status = macb_readl(bp, TSR);
608         macb_writel(bp, TSR, status);
609 
610         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
611                 queue_writel(queue, ISR, MACB_BIT(TCOMP));
612 
613         netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
614                 (unsigned long)status);
615 
616         head = queue->tx_head;
617         for (tail = queue->tx_tail; tail != head; tail++) {
618                 struct macb_tx_skb      *tx_skb;
619                 struct sk_buff          *skb;
620                 struct macb_dma_desc    *desc;
621                 u32                     ctrl;
622 
623                 desc = macb_tx_desc(queue, tail);
624 
625                 /* Make hw descriptor updates visible to CPU */
626                 rmb();
627 
628                 ctrl = desc->ctrl;
629 
630                 /* TX_USED bit is only set by hardware on the very first buffer
631                  * descriptor of the transmitted frame.
632                  */
633                 if (!(ctrl & MACB_BIT(TX_USED)))
634                         break;
635 
636                 /* Process all buffers of the current transmitted frame */
637                 for (;; tail++) {
638                         tx_skb = macb_tx_skb(queue, tail);
639                         skb = tx_skb->skb;
640 
641                         /* First, update TX stats if needed */
642                         if (skb) {
643                                 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
644                                             macb_tx_ring_wrap(tail), skb->data);
645                                 bp->stats.tx_packets++;
646                                 bp->stats.tx_bytes += skb->len;
647                         }
648 
649                         /* Now we can safely release resources */
650                         macb_tx_unmap(bp, tx_skb);
651 
652                         /* skb is set only for the last buffer of the frame.
653                          * WARNING: at this point skb has been freed by
654                          * macb_tx_unmap().
655                          */
656                         if (skb)
657                                 break;
658                 }
659         }
660 
661         queue->tx_tail = tail;
662         if (__netif_subqueue_stopped(bp->dev, queue_index) &&
663             CIRC_CNT(queue->tx_head, queue->tx_tail,
664                      TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
665                 netif_wake_subqueue(bp->dev, queue_index);
666 }
667 
668 static void gem_rx_refill(struct macb *bp)
669 {
670         unsigned int            entry;
671         struct sk_buff          *skb;
672         dma_addr_t              paddr;
673 
674         while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
675                 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
676 
677                 /* Make hw descriptor updates visible to CPU */
678                 rmb();
679 
680                 bp->rx_prepared_head++;
681 
682                 if (bp->rx_skbuff[entry] == NULL) {
683                         /* allocate sk_buff for this free entry in ring */
684                         skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
685                         if (unlikely(skb == NULL)) {
686                                 netdev_err(bp->dev,
687                                            "Unable to allocate sk_buff\n");
688                                 break;
689                         }
690 
691                         /* now fill corresponding descriptor entry */
692                         paddr = dma_map_single(&bp->pdev->dev, skb->data,
693                                                bp->rx_buffer_size, DMA_FROM_DEVICE);
694                         if (dma_mapping_error(&bp->pdev->dev, paddr)) {
695                                 dev_kfree_skb(skb);
696                                 break;
697                         }
698 
699                         bp->rx_skbuff[entry] = skb;
700 
701                         if (entry == RX_RING_SIZE - 1)
702                                 paddr |= MACB_BIT(RX_WRAP);
703                         bp->rx_ring[entry].addr = paddr;
704                         bp->rx_ring[entry].ctrl = 0;
705 
706                         /* properly align Ethernet header */
707                         skb_reserve(skb, NET_IP_ALIGN);
708                 }
709         }
710 
711         /* Make descriptor updates visible to hardware */
712         wmb();
713 
714         netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
715                    bp->rx_prepared_head, bp->rx_tail);
716 }
717 
718 /* Mark DMA descriptors from begin up to and not including end as unused */
719 static void discard_partial_frame(struct macb *bp, unsigned int begin,
720                                   unsigned int end)
721 {
722         unsigned int frag;
723 
724         for (frag = begin; frag != end; frag++) {
725                 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
726                 desc->addr &= ~MACB_BIT(RX_USED);
727         }
728 
729         /* Make descriptor updates visible to hardware */
730         wmb();
731 
732         /*
733          * When this happens, the hardware stats registers for
734          * whatever caused this is updated, so we don't have to record
735          * anything.
736          */
737 }
738 
739 static int gem_rx(struct macb *bp, int budget)
740 {
741         unsigned int            len;
742         unsigned int            entry;
743         struct sk_buff          *skb;
744         struct macb_dma_desc    *desc;
745         int                     count = 0;
746 
747         while (count < budget) {
748                 u32 addr, ctrl;
749 
750                 entry = macb_rx_ring_wrap(bp->rx_tail);
751                 desc = &bp->rx_ring[entry];
752 
753                 /* Make hw descriptor updates visible to CPU */
754                 rmb();
755 
756                 addr = desc->addr;
757                 ctrl = desc->ctrl;
758 
759                 if (!(addr & MACB_BIT(RX_USED)))
760                         break;
761 
762                 bp->rx_tail++;
763                 count++;
764 
765                 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
766                         netdev_err(bp->dev,
767                                    "not whole frame pointed by descriptor\n");
768                         bp->stats.rx_dropped++;
769                         break;
770                 }
771                 skb = bp->rx_skbuff[entry];
772                 if (unlikely(!skb)) {
773                         netdev_err(bp->dev,
774                                    "inconsistent Rx descriptor chain\n");
775                         bp->stats.rx_dropped++;
776                         break;
777                 }
778                 /* now everything is ready for receiving packet */
779                 bp->rx_skbuff[entry] = NULL;
780                 len = MACB_BFEXT(RX_FRMLEN, ctrl);
781 
782                 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
783 
784                 skb_put(skb, len);
785                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
786                 dma_unmap_single(&bp->pdev->dev, addr,
787                                  bp->rx_buffer_size, DMA_FROM_DEVICE);
788 
789                 skb->protocol = eth_type_trans(skb, bp->dev);
790                 skb_checksum_none_assert(skb);
791                 if (bp->dev->features & NETIF_F_RXCSUM &&
792                     !(bp->dev->flags & IFF_PROMISC) &&
793                     GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
794                         skb->ip_summed = CHECKSUM_UNNECESSARY;
795 
796                 bp->stats.rx_packets++;
797                 bp->stats.rx_bytes += skb->len;
798 
799 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
800                 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
801                             skb->len, skb->csum);
802                 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
803                                skb_mac_header(skb), 16, true);
804                 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
805                                skb->data, 32, true);
806 #endif
807 
808                 netif_receive_skb(skb);
809         }
810 
811         gem_rx_refill(bp);
812 
813         return count;
814 }
815 
816 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
817                          unsigned int last_frag)
818 {
819         unsigned int len;
820         unsigned int frag;
821         unsigned int offset;
822         struct sk_buff *skb;
823         struct macb_dma_desc *desc;
824 
825         desc = macb_rx_desc(bp, last_frag);
826         len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
827 
828         netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
829                 macb_rx_ring_wrap(first_frag),
830                 macb_rx_ring_wrap(last_frag), len);
831 
832         /*
833          * The ethernet header starts NET_IP_ALIGN bytes into the
834          * first buffer. Since the header is 14 bytes, this makes the
835          * payload word-aligned.
836          *
837          * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
838          * the two padding bytes into the skb so that we avoid hitting
839          * the slowpath in memcpy(), and pull them off afterwards.
840          */
841         skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
842         if (!skb) {
843                 bp->stats.rx_dropped++;
844                 for (frag = first_frag; ; frag++) {
845                         desc = macb_rx_desc(bp, frag);
846                         desc->addr &= ~MACB_BIT(RX_USED);
847                         if (frag == last_frag)
848                                 break;
849                 }
850 
851                 /* Make descriptor updates visible to hardware */
852                 wmb();
853 
854                 return 1;
855         }
856 
857         offset = 0;
858         len += NET_IP_ALIGN;
859         skb_checksum_none_assert(skb);
860         skb_put(skb, len);
861 
862         for (frag = first_frag; ; frag++) {
863                 unsigned int frag_len = bp->rx_buffer_size;
864 
865                 if (offset + frag_len > len) {
866                         BUG_ON(frag != last_frag);
867                         frag_len = len - offset;
868                 }
869                 skb_copy_to_linear_data_offset(skb, offset,
870                                 macb_rx_buffer(bp, frag), frag_len);
871                 offset += bp->rx_buffer_size;
872                 desc = macb_rx_desc(bp, frag);
873                 desc->addr &= ~MACB_BIT(RX_USED);
874 
875                 if (frag == last_frag)
876                         break;
877         }
878 
879         /* Make descriptor updates visible to hardware */
880         wmb();
881 
882         __skb_pull(skb, NET_IP_ALIGN);
883         skb->protocol = eth_type_trans(skb, bp->dev);
884 
885         bp->stats.rx_packets++;
886         bp->stats.rx_bytes += skb->len;
887         netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
888                    skb->len, skb->csum);
889         netif_receive_skb(skb);
890 
891         return 0;
892 }
893 
894 static int macb_rx(struct macb *bp, int budget)
895 {
896         int received = 0;
897         unsigned int tail;
898         int first_frag = -1;
899 
900         for (tail = bp->rx_tail; budget > 0; tail++) {
901                 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
902                 u32 addr, ctrl;
903 
904                 /* Make hw descriptor updates visible to CPU */
905                 rmb();
906 
907                 addr = desc->addr;
908                 ctrl = desc->ctrl;
909 
910                 if (!(addr & MACB_BIT(RX_USED)))
911                         break;
912 
913                 if (ctrl & MACB_BIT(RX_SOF)) {
914                         if (first_frag != -1)
915                                 discard_partial_frame(bp, first_frag, tail);
916                         first_frag = tail;
917                 }
918 
919                 if (ctrl & MACB_BIT(RX_EOF)) {
920                         int dropped;
921                         BUG_ON(first_frag == -1);
922 
923                         dropped = macb_rx_frame(bp, first_frag, tail);
924                         first_frag = -1;
925                         if (!dropped) {
926                                 received++;
927                                 budget--;
928                         }
929                 }
930         }
931 
932         if (first_frag != -1)
933                 bp->rx_tail = first_frag;
934         else
935                 bp->rx_tail = tail;
936 
937         return received;
938 }
939 
940 static int macb_poll(struct napi_struct *napi, int budget)
941 {
942         struct macb *bp = container_of(napi, struct macb, napi);
943         int work_done;
944         u32 status;
945 
946         status = macb_readl(bp, RSR);
947         macb_writel(bp, RSR, status);
948 
949         work_done = 0;
950 
951         netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
952                    (unsigned long)status, budget);
953 
954         work_done = bp->macbgem_ops.mog_rx(bp, budget);
955         if (work_done < budget) {
956                 napi_complete(napi);
957 
958                 /* Packets received while interrupts were disabled */
959                 status = macb_readl(bp, RSR);
960                 if (status) {
961                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
962                                 macb_writel(bp, ISR, MACB_BIT(RCOMP));
963                         napi_reschedule(napi);
964                 } else {
965                         macb_writel(bp, IER, MACB_RX_INT_FLAGS);
966                 }
967         }
968 
969         /* TODO: Handle errors */
970 
971         return work_done;
972 }
973 
974 static irqreturn_t macb_interrupt(int irq, void *dev_id)
975 {
976         struct macb_queue *queue = dev_id;
977         struct macb *bp = queue->bp;
978         struct net_device *dev = bp->dev;
979         u32 status;
980 
981         status = queue_readl(queue, ISR);
982 
983         if (unlikely(!status))
984                 return IRQ_NONE;
985 
986         spin_lock(&bp->lock);
987 
988         while (status) {
989                 /* close possible race with dev_close */
990                 if (unlikely(!netif_running(dev))) {
991                         queue_writel(queue, IDR, -1);
992                         break;
993                 }
994 
995                 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
996                             (unsigned int)(queue - bp->queues),
997                             (unsigned long)status);
998 
999                 if (status & MACB_RX_INT_FLAGS) {
1000                         /*
1001                          * There's no point taking any more interrupts
1002                          * until we have processed the buffers. The
1003                          * scheduling call may fail if the poll routine
1004                          * is already scheduled, so disable interrupts
1005                          * now.
1006                          */
1007                         queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1008                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1009                                 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1010 
1011                         if (napi_schedule_prep(&bp->napi)) {
1012                                 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1013                                 __napi_schedule(&bp->napi);
1014                         }
1015                 }
1016 
1017                 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1018                         queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1019                         schedule_work(&queue->tx_error_task);
1020 
1021                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1022                                 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1023 
1024                         break;
1025                 }
1026 
1027                 if (status & MACB_BIT(TCOMP))
1028                         macb_tx_interrupt(queue);
1029 
1030                 /*
1031                  * Link change detection isn't possible with RMII, so we'll
1032                  * add that if/when we get our hands on a full-blown MII PHY.
1033                  */
1034 
1035                 if (status & MACB_BIT(ISR_ROVR)) {
1036                         /* We missed at least one packet */
1037                         if (macb_is_gem(bp))
1038                                 bp->hw_stats.gem.rx_overruns++;
1039                         else
1040                                 bp->hw_stats.macb.rx_overruns++;
1041 
1042                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1043                                 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1044                 }
1045 
1046                 if (status & MACB_BIT(HRESP)) {
1047                         /*
1048                          * TODO: Reset the hardware, and maybe move the
1049                          * netdev_err to a lower-priority context as well
1050                          * (work queue?)
1051                          */
1052                         netdev_err(dev, "DMA bus error: HRESP not OK\n");
1053 
1054                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1055                                 queue_writel(queue, ISR, MACB_BIT(HRESP));
1056                 }
1057 
1058                 status = queue_readl(queue, ISR);
1059         }
1060 
1061         spin_unlock(&bp->lock);
1062 
1063         return IRQ_HANDLED;
1064 }
1065 
1066 #ifdef CONFIG_NET_POLL_CONTROLLER
1067 /*
1068  * Polling receive - used by netconsole and other diagnostic tools
1069  * to allow network i/o with interrupts disabled.
1070  */
1071 static void macb_poll_controller(struct net_device *dev)
1072 {
1073         struct macb *bp = netdev_priv(dev);
1074         struct macb_queue *queue;
1075         unsigned long flags;
1076         unsigned int q;
1077 
1078         local_irq_save(flags);
1079         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1080                 macb_interrupt(dev->irq, queue);
1081         local_irq_restore(flags);
1082 }
1083 #endif
1084 
1085 static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1086                                                      unsigned int len)
1087 {
1088         return (len + bp->max_tx_length - 1) / bp->max_tx_length;
1089 }
1090 
1091 static unsigned int macb_tx_map(struct macb *bp,
1092                                 struct macb_queue *queue,
1093                                 struct sk_buff *skb)
1094 {
1095         dma_addr_t mapping;
1096         unsigned int len, entry, i, tx_head = queue->tx_head;
1097         struct macb_tx_skb *tx_skb = NULL;
1098         struct macb_dma_desc *desc;
1099         unsigned int offset, size, count = 0;
1100         unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1101         unsigned int eof = 1;
1102         u32 ctrl;
1103 
1104         /* First, map non-paged data */
1105         len = skb_headlen(skb);
1106         offset = 0;
1107         while (len) {
1108                 size = min(len, bp->max_tx_length);
1109                 entry = macb_tx_ring_wrap(tx_head);
1110                 tx_skb = &queue->tx_skb[entry];
1111 
1112                 mapping = dma_map_single(&bp->pdev->dev,
1113                                          skb->data + offset,
1114                                          size, DMA_TO_DEVICE);
1115                 if (dma_mapping_error(&bp->pdev->dev, mapping))
1116                         goto dma_error;
1117 
1118                 /* Save info to properly release resources */
1119                 tx_skb->skb = NULL;
1120                 tx_skb->mapping = mapping;
1121                 tx_skb->size = size;
1122                 tx_skb->mapped_as_page = false;
1123 
1124                 len -= size;
1125                 offset += size;
1126                 count++;
1127                 tx_head++;
1128         }
1129 
1130         /* Then, map paged data from fragments */
1131         for (f = 0; f < nr_frags; f++) {
1132                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1133 
1134                 len = skb_frag_size(frag);
1135                 offset = 0;
1136                 while (len) {
1137                         size = min(len, bp->max_tx_length);
1138                         entry = macb_tx_ring_wrap(tx_head);
1139                         tx_skb = &queue->tx_skb[entry];
1140 
1141                         mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1142                                                    offset, size, DMA_TO_DEVICE);
1143                         if (dma_mapping_error(&bp->pdev->dev, mapping))
1144                                 goto dma_error;
1145 
1146                         /* Save info to properly release resources */
1147                         tx_skb->skb = NULL;
1148                         tx_skb->mapping = mapping;
1149                         tx_skb->size = size;
1150                         tx_skb->mapped_as_page = true;
1151 
1152                         len -= size;
1153                         offset += size;
1154                         count++;
1155                         tx_head++;
1156                 }
1157         }
1158 
1159         /* Should never happen */
1160         if (unlikely(tx_skb == NULL)) {
1161                 netdev_err(bp->dev, "BUG! empty skb!\n");
1162                 return 0;
1163         }
1164 
1165         /* This is the last buffer of the frame: save socket buffer */
1166         tx_skb->skb = skb;
1167 
1168         /* Update TX ring: update buffer descriptors in reverse order
1169          * to avoid race condition
1170          */
1171 
1172         /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1173          * to set the end of TX queue
1174          */
1175         i = tx_head;
1176         entry = macb_tx_ring_wrap(i);
1177         ctrl = MACB_BIT(TX_USED);
1178         desc = &queue->tx_ring[entry];
1179         desc->ctrl = ctrl;
1180 
1181         do {
1182                 i--;
1183                 entry = macb_tx_ring_wrap(i);
1184                 tx_skb = &queue->tx_skb[entry];
1185                 desc = &queue->tx_ring[entry];
1186 
1187                 ctrl = (u32)tx_skb->size;
1188                 if (eof) {
1189                         ctrl |= MACB_BIT(TX_LAST);
1190                         eof = 0;
1191                 }
1192                 if (unlikely(entry == (TX_RING_SIZE - 1)))
1193                         ctrl |= MACB_BIT(TX_WRAP);
1194 
1195                 /* Set TX buffer descriptor */
1196                 desc->addr = tx_skb->mapping;
1197                 /* desc->addr must be visible to hardware before clearing
1198                  * 'TX_USED' bit in desc->ctrl.
1199                  */
1200                 wmb();
1201                 desc->ctrl = ctrl;
1202         } while (i != queue->tx_head);
1203 
1204         queue->tx_head = tx_head;
1205 
1206         return count;
1207 
1208 dma_error:
1209         netdev_err(bp->dev, "TX DMA map failed\n");
1210 
1211         for (i = queue->tx_head; i != tx_head; i++) {
1212                 tx_skb = macb_tx_skb(queue, i);
1213 
1214                 macb_tx_unmap(bp, tx_skb);
1215         }
1216 
1217         return 0;
1218 }
1219 
1220 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1221 {
1222         u16 queue_index = skb_get_queue_mapping(skb);
1223         struct macb *bp = netdev_priv(dev);
1224         struct macb_queue *queue = &bp->queues[queue_index];
1225         unsigned long flags;
1226         unsigned int count, nr_frags, frag_size, f;
1227 
1228 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1229         netdev_vdbg(bp->dev,
1230                    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1231                    queue_index, skb->len, skb->head, skb->data,
1232                    skb_tail_pointer(skb), skb_end_pointer(skb));
1233         print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1234                        skb->data, 16, true);
1235 #endif
1236 
1237         /* Count how many TX buffer descriptors are needed to send this
1238          * socket buffer: skb fragments of jumbo frames may need to be
1239          * splitted into many buffer descriptors.
1240          */
1241         count = macb_count_tx_descriptors(bp, skb_headlen(skb));
1242         nr_frags = skb_shinfo(skb)->nr_frags;
1243         for (f = 0; f < nr_frags; f++) {
1244                 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1245                 count += macb_count_tx_descriptors(bp, frag_size);
1246         }
1247 
1248         spin_lock_irqsave(&bp->lock, flags);
1249 
1250         /* This is a hard error, log it. */
1251         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
1252                 netif_stop_subqueue(dev, queue_index);
1253                 spin_unlock_irqrestore(&bp->lock, flags);
1254                 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1255                            queue->tx_head, queue->tx_tail);
1256                 return NETDEV_TX_BUSY;
1257         }
1258 
1259         /* Map socket buffer for DMA transfer */
1260         if (!macb_tx_map(bp, queue, skb)) {
1261                 dev_kfree_skb_any(skb);
1262                 goto unlock;
1263         }
1264 
1265         /* Make newly initialized descriptor visible to hardware */
1266         wmb();
1267 
1268         skb_tx_timestamp(skb);
1269 
1270         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1271 
1272         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
1273                 netif_stop_subqueue(dev, queue_index);
1274 
1275 unlock:
1276         spin_unlock_irqrestore(&bp->lock, flags);
1277 
1278         return NETDEV_TX_OK;
1279 }
1280 
1281 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1282 {
1283         if (!macb_is_gem(bp)) {
1284                 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1285         } else {
1286                 bp->rx_buffer_size = size;
1287 
1288                 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1289                         netdev_dbg(bp->dev,
1290                                     "RX buffer must be multiple of %d bytes, expanding\n",
1291                                     RX_BUFFER_MULTIPLE);
1292                         bp->rx_buffer_size =
1293                                 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1294                 }
1295         }
1296 
1297         netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1298                    bp->dev->mtu, bp->rx_buffer_size);
1299 }
1300 
1301 static void gem_free_rx_buffers(struct macb *bp)
1302 {
1303         struct sk_buff          *skb;
1304         struct macb_dma_desc    *desc;
1305         dma_addr_t              addr;
1306         int i;
1307 
1308         if (!bp->rx_skbuff)
1309                 return;
1310 
1311         for (i = 0; i < RX_RING_SIZE; i++) {
1312                 skb = bp->rx_skbuff[i];
1313 
1314                 if (skb == NULL)
1315                         continue;
1316 
1317                 desc = &bp->rx_ring[i];
1318                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1319                 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1320                                  DMA_FROM_DEVICE);
1321                 dev_kfree_skb_any(skb);
1322                 skb = NULL;
1323         }
1324 
1325         kfree(bp->rx_skbuff);
1326         bp->rx_skbuff = NULL;
1327 }
1328 
1329 static void macb_free_rx_buffers(struct macb *bp)
1330 {
1331         if (bp->rx_buffers) {
1332                 dma_free_coherent(&bp->pdev->dev,
1333                                   RX_RING_SIZE * bp->rx_buffer_size,
1334                                   bp->rx_buffers, bp->rx_buffers_dma);
1335                 bp->rx_buffers = NULL;
1336         }
1337 }
1338 
1339 static void macb_free_consistent(struct macb *bp)
1340 {
1341         struct macb_queue *queue;
1342         unsigned int q;
1343 
1344         bp->macbgem_ops.mog_free_rx_buffers(bp);
1345         if (bp->rx_ring) {
1346                 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1347                                   bp->rx_ring, bp->rx_ring_dma);
1348                 bp->rx_ring = NULL;
1349         }
1350 
1351         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1352                 kfree(queue->tx_skb);
1353                 queue->tx_skb = NULL;
1354                 if (queue->tx_ring) {
1355                         dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1356                                           queue->tx_ring, queue->tx_ring_dma);
1357                         queue->tx_ring = NULL;
1358                 }
1359         }
1360 }
1361 
1362 static int gem_alloc_rx_buffers(struct macb *bp)
1363 {
1364         int size;
1365 
1366         size = RX_RING_SIZE * sizeof(struct sk_buff *);
1367         bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1368         if (!bp->rx_skbuff)
1369                 return -ENOMEM;
1370         else
1371                 netdev_dbg(bp->dev,
1372                            "Allocated %d RX struct sk_buff entries at %p\n",
1373                            RX_RING_SIZE, bp->rx_skbuff);
1374         return 0;
1375 }
1376 
1377 static int macb_alloc_rx_buffers(struct macb *bp)
1378 {
1379         int size;
1380 
1381         size = RX_RING_SIZE * bp->rx_buffer_size;
1382         bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1383                                             &bp->rx_buffers_dma, GFP_KERNEL);
1384         if (!bp->rx_buffers)
1385                 return -ENOMEM;
1386         else
1387                 netdev_dbg(bp->dev,
1388                            "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1389                            size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1390         return 0;
1391 }
1392 
1393 static int macb_alloc_consistent(struct macb *bp)
1394 {
1395         struct macb_queue *queue;
1396         unsigned int q;
1397         int size;
1398 
1399         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1400                 size = TX_RING_BYTES;
1401                 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1402                                                     &queue->tx_ring_dma,
1403                                                     GFP_KERNEL);
1404                 if (!queue->tx_ring)
1405                         goto out_err;
1406                 netdev_dbg(bp->dev,
1407                            "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1408                            q, size, (unsigned long)queue->tx_ring_dma,
1409                            queue->tx_ring);
1410 
1411                 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1412                 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1413                 if (!queue->tx_skb)
1414                         goto out_err;
1415         }
1416 
1417         size = RX_RING_BYTES;
1418         bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1419                                          &bp->rx_ring_dma, GFP_KERNEL);
1420         if (!bp->rx_ring)
1421                 goto out_err;
1422         netdev_dbg(bp->dev,
1423                    "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1424                    size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1425 
1426         if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1427                 goto out_err;
1428 
1429         return 0;
1430 
1431 out_err:
1432         macb_free_consistent(bp);
1433         return -ENOMEM;
1434 }
1435 
1436 static void gem_init_rings(struct macb *bp)
1437 {
1438         struct macb_queue *queue;
1439         unsigned int q;
1440         int i;
1441 
1442         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1443                 for (i = 0; i < TX_RING_SIZE; i++) {
1444                         queue->tx_ring[i].addr = 0;
1445                         queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1446                 }
1447                 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1448                 queue->tx_head = 0;
1449                 queue->tx_tail = 0;
1450         }
1451 
1452         bp->rx_tail = 0;
1453         bp->rx_prepared_head = 0;
1454 
1455         gem_rx_refill(bp);
1456 }
1457 
1458 static void macb_init_rings(struct macb *bp)
1459 {
1460         int i;
1461         dma_addr_t addr;
1462 
1463         addr = bp->rx_buffers_dma;
1464         for (i = 0; i < RX_RING_SIZE; i++) {
1465                 bp->rx_ring[i].addr = addr;
1466                 bp->rx_ring[i].ctrl = 0;
1467                 addr += bp->rx_buffer_size;
1468         }
1469         bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
1470 
1471         for (i = 0; i < TX_RING_SIZE; i++) {
1472                 bp->queues[0].tx_ring[i].addr = 0;
1473                 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
1474                 bp->queues[0].tx_head = 0;
1475                 bp->queues[0].tx_tail = 0;
1476         }
1477         bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1478 
1479         bp->rx_tail = 0;
1480 }
1481 
1482 static void macb_reset_hw(struct macb *bp)
1483 {
1484         struct macb_queue *queue;
1485         unsigned int q;
1486 
1487         /*
1488          * Disable RX and TX (XXX: Should we halt the transmission
1489          * more gracefully?)
1490          */
1491         macb_writel(bp, NCR, 0);
1492 
1493         /* Clear the stats registers (XXX: Update stats first?) */
1494         macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1495 
1496         /* Clear all status flags */
1497         macb_writel(bp, TSR, -1);
1498         macb_writel(bp, RSR, -1);
1499 
1500         /* Disable all interrupts */
1501         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1502                 queue_writel(queue, IDR, -1);
1503                 queue_readl(queue, ISR);
1504         }
1505 }
1506 
1507 static u32 gem_mdc_clk_div(struct macb *bp)
1508 {
1509         u32 config;
1510         unsigned long pclk_hz = clk_get_rate(bp->pclk);
1511 
1512         if (pclk_hz <= 20000000)
1513                 config = GEM_BF(CLK, GEM_CLK_DIV8);
1514         else if (pclk_hz <= 40000000)
1515                 config = GEM_BF(CLK, GEM_CLK_DIV16);
1516         else if (pclk_hz <= 80000000)
1517                 config = GEM_BF(CLK, GEM_CLK_DIV32);
1518         else if (pclk_hz <= 120000000)
1519                 config = GEM_BF(CLK, GEM_CLK_DIV48);
1520         else if (pclk_hz <= 160000000)
1521                 config = GEM_BF(CLK, GEM_CLK_DIV64);
1522         else
1523                 config = GEM_BF(CLK, GEM_CLK_DIV96);
1524 
1525         return config;
1526 }
1527 
1528 static u32 macb_mdc_clk_div(struct macb *bp)
1529 {
1530         u32 config;
1531         unsigned long pclk_hz;
1532 
1533         if (macb_is_gem(bp))
1534                 return gem_mdc_clk_div(bp);
1535 
1536         pclk_hz = clk_get_rate(bp->pclk);
1537         if (pclk_hz <= 20000000)
1538                 config = MACB_BF(CLK, MACB_CLK_DIV8);
1539         else if (pclk_hz <= 40000000)
1540                 config = MACB_BF(CLK, MACB_CLK_DIV16);
1541         else if (pclk_hz <= 80000000)
1542                 config = MACB_BF(CLK, MACB_CLK_DIV32);
1543         else
1544                 config = MACB_BF(CLK, MACB_CLK_DIV64);
1545 
1546         return config;
1547 }
1548 
1549 /*
1550  * Get the DMA bus width field of the network configuration register that we
1551  * should program.  We find the width from decoding the design configuration
1552  * register to find the maximum supported data bus width.
1553  */
1554 static u32 macb_dbw(struct macb *bp)
1555 {
1556         if (!macb_is_gem(bp))
1557                 return 0;
1558 
1559         switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1560         case 4:
1561                 return GEM_BF(DBW, GEM_DBW128);
1562         case 2:
1563                 return GEM_BF(DBW, GEM_DBW64);
1564         case 1:
1565         default:
1566                 return GEM_BF(DBW, GEM_DBW32);
1567         }
1568 }
1569 
1570 /*
1571  * Configure the receive DMA engine
1572  * - use the correct receive buffer size
1573  * - set best burst length for DMA operations
1574  *   (if not supported by FIFO, it will fallback to default)
1575  * - set both rx/tx packet buffers to full memory size
1576  * These are configurable parameters for GEM.
1577  */
1578 static void macb_configure_dma(struct macb *bp)
1579 {
1580         u32 dmacfg;
1581 
1582         if (macb_is_gem(bp)) {
1583                 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1584                 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1585                 if (bp->dma_burst_length)
1586                         dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1587                 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1588                 dmacfg &= ~GEM_BIT(ENDIA);
1589                 if (bp->dev->features & NETIF_F_HW_CSUM)
1590                         dmacfg |= GEM_BIT(TXCOEN);
1591                 else
1592                         dmacfg &= ~GEM_BIT(TXCOEN);
1593                 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1594                            dmacfg);
1595                 gem_writel(bp, DMACFG, dmacfg);
1596         }
1597 }
1598 
1599 static void macb_init_hw(struct macb *bp)
1600 {
1601         struct macb_queue *queue;
1602         unsigned int q;
1603 
1604         u32 config;
1605 
1606         macb_reset_hw(bp);
1607         macb_set_hwaddr(bp);
1608 
1609         config = macb_mdc_clk_div(bp);
1610         config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
1611         config |= MACB_BIT(PAE);                /* PAuse Enable */
1612         config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
1613         config |= MACB_BIT(BIG);                /* Receive oversized frames */
1614         if (bp->dev->flags & IFF_PROMISC)
1615                 config |= MACB_BIT(CAF);        /* Copy All Frames */
1616         else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1617                 config |= GEM_BIT(RXCOEN);
1618         if (!(bp->dev->flags & IFF_BROADCAST))
1619                 config |= MACB_BIT(NBC);        /* No BroadCast */
1620         config |= macb_dbw(bp);
1621         macb_writel(bp, NCFGR, config);
1622         bp->speed = SPEED_10;
1623         bp->duplex = DUPLEX_HALF;
1624 
1625         macb_configure_dma(bp);
1626 
1627         /* Initialize TX and RX buffers */
1628         macb_writel(bp, RBQP, bp->rx_ring_dma);
1629         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1630                 queue_writel(queue, TBQP, queue->tx_ring_dma);
1631 
1632                 /* Enable interrupts */
1633                 queue_writel(queue, IER,
1634                              MACB_RX_INT_FLAGS |
1635                              MACB_TX_INT_FLAGS |
1636                              MACB_BIT(HRESP));
1637         }
1638 
1639         /* Enable TX and RX */
1640         macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1641 }
1642 
1643 /*
1644  * The hash address register is 64 bits long and takes up two
1645  * locations in the memory map.  The least significant bits are stored
1646  * in EMAC_HSL and the most significant bits in EMAC_HSH.
1647  *
1648  * The unicast hash enable and the multicast hash enable bits in the
1649  * network configuration register enable the reception of hash matched
1650  * frames. The destination address is reduced to a 6 bit index into
1651  * the 64 bit hash register using the following hash function.  The
1652  * hash function is an exclusive or of every sixth bit of the
1653  * destination address.
1654  *
1655  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1656  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1657  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1658  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1659  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1660  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1661  *
1662  * da[0] represents the least significant bit of the first byte
1663  * received, that is, the multicast/unicast indicator, and da[47]
1664  * represents the most significant bit of the last byte received.  If
1665  * the hash index, hi[n], points to a bit that is set in the hash
1666  * register then the frame will be matched according to whether the
1667  * frame is multicast or unicast.  A multicast match will be signalled
1668  * if the multicast hash enable bit is set, da[0] is 1 and the hash
1669  * index points to a bit set in the hash register.  A unicast match
1670  * will be signalled if the unicast hash enable bit is set, da[0] is 0
1671  * and the hash index points to a bit set in the hash register.  To
1672  * receive all multicast frames, the hash register should be set with
1673  * all ones and the multicast hash enable bit should be set in the
1674  * network configuration register.
1675  */
1676 
1677 static inline int hash_bit_value(int bitnr, __u8 *addr)
1678 {
1679         if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1680                 return 1;
1681         return 0;
1682 }
1683 
1684 /*
1685  * Return the hash index value for the specified address.
1686  */
1687 static int hash_get_index(__u8 *addr)
1688 {
1689         int i, j, bitval;
1690         int hash_index = 0;
1691 
1692         for (j = 0; j < 6; j++) {
1693                 for (i = 0, bitval = 0; i < 8; i++)
1694                         bitval ^= hash_bit_value(i * 6 + j, addr);
1695 
1696                 hash_index |= (bitval << j);
1697         }
1698 
1699         return hash_index;
1700 }
1701 
1702 /*
1703  * Add multicast addresses to the internal multicast-hash table.
1704  */
1705 static void macb_sethashtable(struct net_device *dev)
1706 {
1707         struct netdev_hw_addr *ha;
1708         unsigned long mc_filter[2];
1709         unsigned int bitnr;
1710         struct macb *bp = netdev_priv(dev);
1711 
1712         mc_filter[0] = mc_filter[1] = 0;
1713 
1714         netdev_for_each_mc_addr(ha, dev) {
1715                 bitnr = hash_get_index(ha->addr);
1716                 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1717         }
1718 
1719         macb_or_gem_writel(bp, HRB, mc_filter[0]);
1720         macb_or_gem_writel(bp, HRT, mc_filter[1]);
1721 }
1722 
1723 /*
1724  * Enable/Disable promiscuous and multicast modes.
1725  */
1726 void macb_set_rx_mode(struct net_device *dev)
1727 {
1728         unsigned long cfg;
1729         struct macb *bp = netdev_priv(dev);
1730 
1731         cfg = macb_readl(bp, NCFGR);
1732 
1733         if (dev->flags & IFF_PROMISC) {
1734                 /* Enable promiscuous mode */
1735                 cfg |= MACB_BIT(CAF);
1736 
1737                 /* Disable RX checksum offload */
1738                 if (macb_is_gem(bp))
1739                         cfg &= ~GEM_BIT(RXCOEN);
1740         } else {
1741                 /* Disable promiscuous mode */
1742                 cfg &= ~MACB_BIT(CAF);
1743 
1744                 /* Enable RX checksum offload only if requested */
1745                 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
1746                         cfg |= GEM_BIT(RXCOEN);
1747         }
1748 
1749         if (dev->flags & IFF_ALLMULTI) {
1750                 /* Enable all multicast mode */
1751                 macb_or_gem_writel(bp, HRB, -1);
1752                 macb_or_gem_writel(bp, HRT, -1);
1753                 cfg |= MACB_BIT(NCFGR_MTI);
1754         } else if (!netdev_mc_empty(dev)) {
1755                 /* Enable specific multicasts */
1756                 macb_sethashtable(dev);
1757                 cfg |= MACB_BIT(NCFGR_MTI);
1758         } else if (dev->flags & (~IFF_ALLMULTI)) {
1759                 /* Disable all multicast mode */
1760                 macb_or_gem_writel(bp, HRB, 0);
1761                 macb_or_gem_writel(bp, HRT, 0);
1762                 cfg &= ~MACB_BIT(NCFGR_MTI);
1763         }
1764 
1765         macb_writel(bp, NCFGR, cfg);
1766 }
1767 EXPORT_SYMBOL_GPL(macb_set_rx_mode);
1768 
1769 static int macb_open(struct net_device *dev)
1770 {
1771         struct macb *bp = netdev_priv(dev);
1772         size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1773         int err;
1774 
1775         netdev_dbg(bp->dev, "open\n");
1776 
1777         /* carrier starts down */
1778         netif_carrier_off(dev);
1779 
1780         /* if the phy is not yet register, retry later*/
1781         if (!bp->phy_dev)
1782                 return -EAGAIN;
1783 
1784         /* RX buffers initialization */
1785         macb_init_rx_buffer_size(bp, bufsz);
1786 
1787         err = macb_alloc_consistent(bp);
1788         if (err) {
1789                 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1790                            err);
1791                 return err;
1792         }
1793 
1794         napi_enable(&bp->napi);
1795 
1796         bp->macbgem_ops.mog_init_rings(bp);
1797         macb_init_hw(bp);
1798 
1799         /* schedule a link state check */
1800         phy_start(bp->phy_dev);
1801 
1802         netif_tx_start_all_queues(dev);
1803 
1804         return 0;
1805 }
1806 
1807 static int macb_close(struct net_device *dev)
1808 {
1809         struct macb *bp = netdev_priv(dev);
1810         unsigned long flags;
1811 
1812         netif_tx_stop_all_queues(dev);
1813         napi_disable(&bp->napi);
1814 
1815         if (bp->phy_dev)
1816                 phy_stop(bp->phy_dev);
1817 
1818         spin_lock_irqsave(&bp->lock, flags);
1819         macb_reset_hw(bp);
1820         netif_carrier_off(dev);
1821         spin_unlock_irqrestore(&bp->lock, flags);
1822 
1823         macb_free_consistent(bp);
1824 
1825         return 0;
1826 }
1827 
1828 static void gem_update_stats(struct macb *bp)
1829 {
1830         int i;
1831         u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1832 
1833         for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1834                 u32 offset = gem_statistics[i].offset;
1835                 u64 val = __raw_readl(bp->regs + offset);
1836 
1837                 bp->ethtool_stats[i] += val;
1838                 *p += val;
1839 
1840                 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1841                         /* Add GEM_OCTTXH, GEM_OCTRXH */
1842                         val = __raw_readl(bp->regs + offset + 4);
1843                         bp->ethtool_stats[i] += ((u64)val) << 32;
1844                         *(++p) += val;
1845                 }
1846         }
1847 }
1848 
1849 static struct net_device_stats *gem_get_stats(struct macb *bp)
1850 {
1851         struct gem_stats *hwstat = &bp->hw_stats.gem;
1852         struct net_device_stats *nstat = &bp->stats;
1853 
1854         gem_update_stats(bp);
1855 
1856         nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1857                             hwstat->rx_alignment_errors +
1858                             hwstat->rx_resource_errors +
1859                             hwstat->rx_overruns +
1860                             hwstat->rx_oversize_frames +
1861                             hwstat->rx_jabbers +
1862                             hwstat->rx_undersized_frames +
1863                             hwstat->rx_length_field_frame_errors);
1864         nstat->tx_errors = (hwstat->tx_late_collisions +
1865                             hwstat->tx_excessive_collisions +
1866                             hwstat->tx_underrun +
1867                             hwstat->tx_carrier_sense_errors);
1868         nstat->multicast = hwstat->rx_multicast_frames;
1869         nstat->collisions = (hwstat->tx_single_collision_frames +
1870                              hwstat->tx_multiple_collision_frames +
1871                              hwstat->tx_excessive_collisions);
1872         nstat->rx_length_errors = (hwstat->rx_oversize_frames +
1873                                    hwstat->rx_jabbers +
1874                                    hwstat->rx_undersized_frames +
1875                                    hwstat->rx_length_field_frame_errors);
1876         nstat->rx_over_errors = hwstat->rx_resource_errors;
1877         nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
1878         nstat->rx_frame_errors = hwstat->rx_alignment_errors;
1879         nstat->rx_fifo_errors = hwstat->rx_overruns;
1880         nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
1881         nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
1882         nstat->tx_fifo_errors = hwstat->tx_underrun;
1883 
1884         return nstat;
1885 }
1886 
1887 static void gem_get_ethtool_stats(struct net_device *dev,
1888                                   struct ethtool_stats *stats, u64 *data)
1889 {
1890         struct macb *bp;
1891 
1892         bp = netdev_priv(dev);
1893         gem_update_stats(bp);
1894         memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
1895 }
1896 
1897 static int gem_get_sset_count(struct net_device *dev, int sset)
1898 {
1899         switch (sset) {
1900         case ETH_SS_STATS:
1901                 return GEM_STATS_LEN;
1902         default:
1903                 return -EOPNOTSUPP;
1904         }
1905 }
1906 
1907 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1908 {
1909         int i;
1910 
1911         switch (sset) {
1912         case ETH_SS_STATS:
1913                 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
1914                         memcpy(p, gem_statistics[i].stat_string,
1915                                ETH_GSTRING_LEN);
1916                 break;
1917         }
1918 }
1919 
1920 struct net_device_stats *macb_get_stats(struct net_device *dev)
1921 {
1922         struct macb *bp = netdev_priv(dev);
1923         struct net_device_stats *nstat = &bp->stats;
1924         struct macb_stats *hwstat = &bp->hw_stats.macb;
1925 
1926         if (macb_is_gem(bp))
1927                 return gem_get_stats(bp);
1928 
1929         /* read stats from hardware */
1930         macb_update_stats(bp);
1931 
1932         /* Convert HW stats into netdevice stats */
1933         nstat->rx_errors = (hwstat->rx_fcs_errors +
1934                             hwstat->rx_align_errors +
1935                             hwstat->rx_resource_errors +
1936                             hwstat->rx_overruns +
1937                             hwstat->rx_oversize_pkts +
1938                             hwstat->rx_jabbers +
1939                             hwstat->rx_undersize_pkts +
1940                             hwstat->sqe_test_errors +
1941                             hwstat->rx_length_mismatch);
1942         nstat->tx_errors = (hwstat->tx_late_cols +
1943                             hwstat->tx_excessive_cols +
1944                             hwstat->tx_underruns +
1945                             hwstat->tx_carrier_errors);
1946         nstat->collisions = (hwstat->tx_single_cols +
1947                              hwstat->tx_multiple_cols +
1948                              hwstat->tx_excessive_cols);
1949         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1950                                    hwstat->rx_jabbers +
1951                                    hwstat->rx_undersize_pkts +
1952                                    hwstat->rx_length_mismatch);
1953         nstat->rx_over_errors = hwstat->rx_resource_errors +
1954                                    hwstat->rx_overruns;
1955         nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1956         nstat->rx_frame_errors = hwstat->rx_align_errors;
1957         nstat->rx_fifo_errors = hwstat->rx_overruns;
1958         /* XXX: What does "missed" mean? */
1959         nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
1960         nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
1961         nstat->tx_fifo_errors = hwstat->tx_underruns;
1962         /* Don't know about heartbeat or window errors... */
1963 
1964         return nstat;
1965 }
1966 EXPORT_SYMBOL_GPL(macb_get_stats);
1967 
1968 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1969 {
1970         struct macb *bp = netdev_priv(dev);
1971         struct phy_device *phydev = bp->phy_dev;
1972 
1973         if (!phydev)
1974                 return -ENODEV;
1975 
1976         return phy_ethtool_gset(phydev, cmd);
1977 }
1978 
1979 static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1980 {
1981         struct macb *bp = netdev_priv(dev);
1982         struct phy_device *phydev = bp->phy_dev;
1983 
1984         if (!phydev)
1985                 return -ENODEV;
1986 
1987         return phy_ethtool_sset(phydev, cmd);
1988 }
1989 
1990 static int macb_get_regs_len(struct net_device *netdev)
1991 {
1992         return MACB_GREGS_NBR * sizeof(u32);
1993 }
1994 
1995 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1996                           void *p)
1997 {
1998         struct macb *bp = netdev_priv(dev);
1999         unsigned int tail, head;
2000         u32 *regs_buff = p;
2001 
2002         regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2003                         | MACB_GREGS_VERSION;
2004 
2005         tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
2006         head = macb_tx_ring_wrap(bp->queues[0].tx_head);
2007 
2008         regs_buff[0]  = macb_readl(bp, NCR);
2009         regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
2010         regs_buff[2]  = macb_readl(bp, NSR);
2011         regs_buff[3]  = macb_readl(bp, TSR);
2012         regs_buff[4]  = macb_readl(bp, RBQP);
2013         regs_buff[5]  = macb_readl(bp, TBQP);
2014         regs_buff[6]  = macb_readl(bp, RSR);
2015         regs_buff[7]  = macb_readl(bp, IMR);
2016 
2017         regs_buff[8]  = tail;
2018         regs_buff[9]  = head;
2019         regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2020         regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2021 
2022         if (macb_is_gem(bp)) {
2023                 regs_buff[12] = gem_readl(bp, USRIO);
2024                 regs_buff[13] = gem_readl(bp, DMACFG);
2025         }
2026 }
2027 
2028 const struct ethtool_ops macb_ethtool_ops = {
2029         .get_settings           = macb_get_settings,
2030         .set_settings           = macb_set_settings,
2031         .get_regs_len           = macb_get_regs_len,
2032         .get_regs               = macb_get_regs,
2033         .get_link               = ethtool_op_get_link,
2034         .get_ts_info            = ethtool_op_get_ts_info,
2035 };
2036 EXPORT_SYMBOL_GPL(macb_ethtool_ops);
2037 
2038 static const struct ethtool_ops gem_ethtool_ops = {
2039         .get_settings           = macb_get_settings,
2040         .set_settings           = macb_set_settings,
2041         .get_regs_len           = macb_get_regs_len,
2042         .get_regs               = macb_get_regs,
2043         .get_link               = ethtool_op_get_link,
2044         .get_ts_info            = ethtool_op_get_ts_info,
2045         .get_ethtool_stats      = gem_get_ethtool_stats,
2046         .get_strings            = gem_get_ethtool_strings,
2047         .get_sset_count         = gem_get_sset_count,
2048 };
2049 
2050 int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2051 {
2052         struct macb *bp = netdev_priv(dev);
2053         struct phy_device *phydev = bp->phy_dev;
2054 
2055         if (!netif_running(dev))
2056                 return -EINVAL;
2057 
2058         if (!phydev)
2059                 return -ENODEV;
2060 
2061         return phy_mii_ioctl(phydev, rq, cmd);
2062 }
2063 EXPORT_SYMBOL_GPL(macb_ioctl);
2064 
2065 static int macb_set_features(struct net_device *netdev,
2066                              netdev_features_t features)
2067 {
2068         struct macb *bp = netdev_priv(netdev);
2069         netdev_features_t changed = features ^ netdev->features;
2070 
2071         /* TX checksum offload */
2072         if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2073                 u32 dmacfg;
2074 
2075                 dmacfg = gem_readl(bp, DMACFG);
2076                 if (features & NETIF_F_HW_CSUM)
2077                         dmacfg |= GEM_BIT(TXCOEN);
2078                 else
2079                         dmacfg &= ~GEM_BIT(TXCOEN);
2080                 gem_writel(bp, DMACFG, dmacfg);
2081         }
2082 
2083         /* RX checksum offload */
2084         if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2085                 u32 netcfg;
2086 
2087                 netcfg = gem_readl(bp, NCFGR);
2088                 if (features & NETIF_F_RXCSUM &&
2089                     !(netdev->flags & IFF_PROMISC))
2090                         netcfg |= GEM_BIT(RXCOEN);
2091                 else
2092                         netcfg &= ~GEM_BIT(RXCOEN);
2093                 gem_writel(bp, NCFGR, netcfg);
2094         }
2095 
2096         return 0;
2097 }
2098 
2099 static const struct net_device_ops macb_netdev_ops = {
2100         .ndo_open               = macb_open,
2101         .ndo_stop               = macb_close,
2102         .ndo_start_xmit         = macb_start_xmit,
2103         .ndo_set_rx_mode        = macb_set_rx_mode,
2104         .ndo_get_stats          = macb_get_stats,
2105         .ndo_do_ioctl           = macb_ioctl,
2106         .ndo_validate_addr      = eth_validate_addr,
2107         .ndo_change_mtu         = eth_change_mtu,
2108         .ndo_set_mac_address    = eth_mac_addr,
2109 #ifdef CONFIG_NET_POLL_CONTROLLER
2110         .ndo_poll_controller    = macb_poll_controller,
2111 #endif
2112         .ndo_set_features       = macb_set_features,
2113 };
2114 
2115 #if defined(CONFIG_OF)
2116 static const struct macb_config pc302gem_config = {
2117         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2118         .dma_burst_length = 16,
2119 };
2120 
2121 static const struct macb_config sama5d3_config = {
2122         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2123         .dma_burst_length = 16,
2124 };
2125 
2126 static const struct macb_config sama5d4_config = {
2127         .caps = 0,
2128         .dma_burst_length = 4,
2129 };
2130 
2131 static const struct of_device_id macb_dt_ids[] = {
2132         { .compatible = "cdns,at32ap7000-macb" },
2133         { .compatible = "cdns,at91sam9260-macb" },
2134         { .compatible = "cdns,macb" },
2135         { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
2136         { .compatible = "cdns,gem", .data = &pc302gem_config },
2137         { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
2138         { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
2139         { /* sentinel */ }
2140 };
2141 MODULE_DEVICE_TABLE(of, macb_dt_ids);
2142 #endif
2143 
2144 /*
2145  * Configure peripheral capacities according to device tree
2146  * and integration options used
2147  */
2148 static void macb_configure_caps(struct macb *bp)
2149 {
2150         u32 dcfg;
2151         const struct of_device_id *match;
2152         const struct macb_config *config;
2153 
2154         if (bp->pdev->dev.of_node) {
2155                 match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
2156                 if (match && match->data) {
2157                         config = match->data;
2158 
2159                         bp->caps = config->caps;
2160                         /*
2161                          * As we have access to the matching node, configure
2162                          * DMA burst length as well
2163                          */
2164                         bp->dma_burst_length = config->dma_burst_length;
2165                 }
2166         }
2167 
2168         if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
2169                 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2170 
2171         if (macb_is_gem(bp)) {
2172                 dcfg = gem_readl(bp, DCFG1);
2173                 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2174                         bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2175                 dcfg = gem_readl(bp, DCFG2);
2176                 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2177                         bp->caps |= MACB_CAPS_FIFO_MODE;
2178         }
2179 
2180         netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
2181 }
2182 
2183 static void macb_probe_queues(void __iomem *mem,
2184                               unsigned int *queue_mask,
2185                               unsigned int *num_queues)
2186 {
2187         unsigned int hw_q;
2188         u32 mid;
2189 
2190         *queue_mask = 0x1;
2191         *num_queues = 1;
2192 
2193         /* is it macb or gem ? */
2194         mid = __raw_readl(mem + MACB_MID);
2195         if (MACB_BFEXT(IDNUM, mid) != 0x2)
2196                 return;
2197 
2198         /* bit 0 is never set but queue 0 always exists */
2199         *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff;
2200         *queue_mask |= 0x1;
2201 
2202         for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2203                 if (*queue_mask & (1 << hw_q))
2204                         (*num_queues)++;
2205 }
2206 
2207 static int macb_probe(struct platform_device *pdev)
2208 {
2209         struct macb_platform_data *pdata;
2210         struct resource *regs;
2211         struct net_device *dev;
2212         struct macb *bp;
2213         struct macb_queue *queue;
2214         struct phy_device *phydev;
2215         u32 config;
2216         int err = -ENXIO;
2217         const char *mac;
2218         void __iomem *mem;
2219         unsigned int hw_q, queue_mask, q, num_queues;
2220         struct clk *pclk, *hclk, *tx_clk;
2221 
2222         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2223         if (!regs) {
2224                 dev_err(&pdev->dev, "no mmio resource defined\n");
2225                 goto err_out;
2226         }
2227 
2228         pclk = devm_clk_get(&pdev->dev, "pclk");
2229         if (IS_ERR(pclk)) {
2230                 err = PTR_ERR(pclk);
2231                 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2232                 goto err_out;
2233         }
2234 
2235         hclk = devm_clk_get(&pdev->dev, "hclk");
2236         if (IS_ERR(hclk)) {
2237                 err = PTR_ERR(hclk);
2238                 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2239                 goto err_out;
2240         }
2241 
2242         tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2243 
2244         err = clk_prepare_enable(pclk);
2245         if (err) {
2246                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2247                 goto err_out;
2248         }
2249 
2250         err = clk_prepare_enable(hclk);
2251         if (err) {
2252                 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2253                 goto err_out_disable_pclk;
2254         }
2255 
2256         if (!IS_ERR(tx_clk)) {
2257                 err = clk_prepare_enable(tx_clk);
2258                 if (err) {
2259                         dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
2260                                 err);
2261                         goto err_out_disable_hclk;
2262                 }
2263         }
2264 
2265         err = -ENOMEM;
2266         mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
2267         if (!mem) {
2268                 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
2269                 goto err_out_disable_clocks;
2270         }
2271 
2272         macb_probe_queues(mem, &queue_mask, &num_queues);
2273         dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2274         if (!dev)
2275                 goto err_out_disable_clocks;
2276 
2277         SET_NETDEV_DEV(dev, &pdev->dev);
2278 
2279         bp = netdev_priv(dev);
2280         bp->pdev = pdev;
2281         bp->dev = dev;
2282         bp->regs = mem;
2283         bp->num_queues = num_queues;
2284         bp->pclk = pclk;
2285         bp->hclk = hclk;
2286         bp->tx_clk = tx_clk;
2287 
2288         spin_lock_init(&bp->lock);
2289 
2290         /* set the queue register mapping once for all: queue0 has a special
2291          * register mapping but we don't want to test the queue index then
2292          * compute the corresponding register offset at run time.
2293          */
2294         for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2295                 if (!(queue_mask & (1 << hw_q)))
2296                         continue;
2297 
2298                 queue = &bp->queues[q];
2299                 queue->bp = bp;
2300                 if (hw_q) {
2301                         queue->ISR  = GEM_ISR(hw_q - 1);
2302                         queue->IER  = GEM_IER(hw_q - 1);
2303                         queue->IDR  = GEM_IDR(hw_q - 1);
2304                         queue->IMR  = GEM_IMR(hw_q - 1);
2305                         queue->TBQP = GEM_TBQP(hw_q - 1);
2306                 } else {
2307                         /* queue0 uses legacy registers */
2308                         queue->ISR  = MACB_ISR;
2309                         queue->IER  = MACB_IER;
2310                         queue->IDR  = MACB_IDR;
2311                         queue->IMR  = MACB_IMR;
2312                         queue->TBQP = MACB_TBQP;
2313                 }
2314 
2315                 /* get irq: here we use the linux queue index, not the hardware
2316                  * queue index. the queue irq definitions in the device tree
2317                  * must remove the optional gaps that could exist in the
2318                  * hardware queue mask.
2319                  */
2320                 queue->irq = platform_get_irq(pdev, q);
2321                 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2322                                        0, dev->name, queue);
2323                 if (err) {
2324                         dev_err(&pdev->dev,
2325                                 "Unable to request IRQ %d (error %d)\n",
2326                                 queue->irq, err);
2327                         goto err_out_free_netdev;
2328                 }
2329 
2330                 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2331                 q++;
2332         }
2333         dev->irq = bp->queues[0].irq;
2334 
2335         dev->netdev_ops = &macb_netdev_ops;
2336         netif_napi_add(dev, &bp->napi, macb_poll, 64);
2337 
2338         dev->base_addr = regs->start;
2339 
2340         /* setup capacities */
2341         macb_configure_caps(bp);
2342 
2343         /* setup appropriated routines according to adapter type */
2344         if (macb_is_gem(bp)) {
2345                 bp->max_tx_length = GEM_MAX_TX_LEN;
2346                 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2347                 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2348                 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2349                 bp->macbgem_ops.mog_rx = gem_rx;
2350                 dev->ethtool_ops = &gem_ethtool_ops;
2351         } else {
2352                 bp->max_tx_length = MACB_MAX_TX_LEN;
2353                 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2354                 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2355                 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2356                 bp->macbgem_ops.mog_rx = macb_rx;
2357                 dev->ethtool_ops = &macb_ethtool_ops;
2358         }
2359 
2360         /* Set features */
2361         dev->hw_features = NETIF_F_SG;
2362         /* Checksum offload is only available on gem with packet buffer */
2363         if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2364                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2365         if (bp->caps & MACB_CAPS_SG_DISABLED)
2366                 dev->hw_features &= ~NETIF_F_SG;
2367         dev->features = dev->hw_features;
2368 
2369         /* Set MII management clock divider */
2370         config = macb_mdc_clk_div(bp);
2371         config |= macb_dbw(bp);
2372         macb_writel(bp, NCFGR, config);
2373 
2374         mac = of_get_mac_address(pdev->dev.of_node);
2375         if (mac)
2376                 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
2377         else
2378                 macb_get_hwaddr(bp);
2379 
2380         err = of_get_phy_mode(pdev->dev.of_node);
2381         if (err < 0) {
2382                 pdata = dev_get_platdata(&pdev->dev);
2383                 if (pdata && pdata->is_rmii)
2384                         bp->phy_interface = PHY_INTERFACE_MODE_RMII;
2385                 else
2386                         bp->phy_interface = PHY_INTERFACE_MODE_MII;
2387         } else {
2388                 bp->phy_interface = err;
2389         }
2390 
2391         if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2392                 macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
2393         else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
2394 #if defined(CONFIG_ARCH_AT91)
2395                 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
2396                                                MACB_BIT(CLKEN)));
2397 #else
2398                 macb_or_gem_writel(bp, USRIO, 0);
2399 #endif
2400         else
2401 #if defined(CONFIG_ARCH_AT91)
2402                 macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
2403 #else
2404                 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
2405 #endif
2406 
2407         err = register_netdev(dev);
2408         if (err) {
2409                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2410                 goto err_out_free_netdev;
2411         }
2412 
2413         err = macb_mii_init(bp);
2414         if (err)
2415                 goto err_out_unregister_netdev;
2416 
2417         platform_set_drvdata(pdev, dev);
2418 
2419         netif_carrier_off(dev);
2420 
2421         netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
2422                     macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
2423                     dev->base_addr, dev->irq, dev->dev_addr);
2424 
2425         phydev = bp->phy_dev;
2426         netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
2427                     phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
2428 
2429         return 0;
2430 
2431 err_out_unregister_netdev:
2432         unregister_netdev(dev);
2433 err_out_free_netdev:
2434         free_netdev(dev);
2435 err_out_disable_clocks:
2436         if (!IS_ERR(tx_clk))
2437                 clk_disable_unprepare(tx_clk);
2438 err_out_disable_hclk:
2439         clk_disable_unprepare(hclk);
2440 err_out_disable_pclk:
2441         clk_disable_unprepare(pclk);
2442 err_out:
2443         return err;
2444 }
2445 
2446 static int macb_remove(struct platform_device *pdev)
2447 {
2448         struct net_device *dev;
2449         struct macb *bp;
2450 
2451         dev = platform_get_drvdata(pdev);
2452 
2453         if (dev) {
2454                 bp = netdev_priv(dev);
2455                 if (bp->phy_dev)
2456                         phy_disconnect(bp->phy_dev);
2457                 mdiobus_unregister(bp->mii_bus);
2458                 kfree(bp->mii_bus->irq);
2459                 mdiobus_free(bp->mii_bus);
2460                 unregister_netdev(dev);
2461                 if (!IS_ERR(bp->tx_clk))
2462                         clk_disable_unprepare(bp->tx_clk);
2463                 clk_disable_unprepare(bp->hclk);
2464                 clk_disable_unprepare(bp->pclk);
2465                 free_netdev(dev);
2466         }
2467 
2468         return 0;
2469 }
2470 
2471 static int __maybe_unused macb_suspend(struct device *dev)
2472 {
2473         struct platform_device *pdev = to_platform_device(dev);
2474         struct net_device *netdev = platform_get_drvdata(pdev);
2475         struct macb *bp = netdev_priv(netdev);
2476 
2477         netif_carrier_off(netdev);
2478         netif_device_detach(netdev);
2479 
2480         if (!IS_ERR(bp->tx_clk))
2481                 clk_disable_unprepare(bp->tx_clk);
2482         clk_disable_unprepare(bp->hclk);
2483         clk_disable_unprepare(bp->pclk);
2484 
2485         return 0;
2486 }
2487 
2488 static int __maybe_unused macb_resume(struct device *dev)
2489 {
2490         struct platform_device *pdev = to_platform_device(dev);
2491         struct net_device *netdev = platform_get_drvdata(pdev);
2492         struct macb *bp = netdev_priv(netdev);
2493 
2494         clk_prepare_enable(bp->pclk);
2495         clk_prepare_enable(bp->hclk);
2496         if (!IS_ERR(bp->tx_clk))
2497                 clk_prepare_enable(bp->tx_clk);
2498 
2499         netif_device_attach(netdev);
2500 
2501         return 0;
2502 }
2503 
2504 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
2505 
2506 static struct platform_driver macb_driver = {
2507         .probe          = macb_probe,
2508         .remove         = macb_remove,
2509         .driver         = {
2510                 .name           = "macb",
2511                 .of_match_table = of_match_ptr(macb_dt_ids),
2512                 .pm     = &macb_pm_ops,
2513         },
2514 };
2515 
2516 module_platform_driver(macb_driver);
2517 
2518 MODULE_LICENSE("GPL");
2519 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
2520 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2521 MODULE_ALIAS("platform:macb");
2522 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us