Version:  2.0.40 2.2.26 2.4.37 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8

Linux/drivers/net/ethernet/cadence/macb.c

  1 /*
  2  * Cadence MACB/GEM Ethernet Controller driver
  3  *
  4  * Copyright (C) 2004-2006 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 
 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12 #include <linux/clk.h>
 13 #include <linux/module.h>
 14 #include <linux/moduleparam.h>
 15 #include <linux/kernel.h>
 16 #include <linux/types.h>
 17 #include <linux/circ_buf.h>
 18 #include <linux/slab.h>
 19 #include <linux/init.h>
 20 #include <linux/io.h>
 21 #include <linux/gpio.h>
 22 #include <linux/gpio/consumer.h>
 23 #include <linux/interrupt.h>
 24 #include <linux/netdevice.h>
 25 #include <linux/etherdevice.h>
 26 #include <linux/dma-mapping.h>
 27 #include <linux/platform_data/macb.h>
 28 #include <linux/platform_device.h>
 29 #include <linux/phy.h>
 30 #include <linux/of.h>
 31 #include <linux/of_device.h>
 32 #include <linux/of_gpio.h>
 33 #include <linux/of_mdio.h>
 34 #include <linux/of_net.h>
 35 
 36 #include "macb.h"
 37 
 38 #define MACB_RX_BUFFER_SIZE     128
 39 #define RX_BUFFER_MULTIPLE      64  /* bytes */
 40 #define RX_RING_SIZE            512 /* must be power of 2 */
 41 #define RX_RING_BYTES           (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 42 
 43 #define TX_RING_SIZE            128 /* must be power of 2 */
 44 #define TX_RING_BYTES           (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 45 
 46 /* level of occupied TX descriptors under which we wake up TX process */
 47 #define MACB_TX_WAKEUP_THRESH   (3 * TX_RING_SIZE / 4)
 48 
 49 #define MACB_RX_INT_FLAGS       (MACB_BIT(RCOMP) | MACB_BIT(RXUBR)      \
 50                                  | MACB_BIT(ISR_ROVR))
 51 #define MACB_TX_ERR_FLAGS       (MACB_BIT(ISR_TUND)                     \
 52                                         | MACB_BIT(ISR_RLE)             \
 53                                         | MACB_BIT(TXERR))
 54 #define MACB_TX_INT_FLAGS       (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 55 
 56 #define MACB_MAX_TX_LEN         ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 57 #define GEM_MAX_TX_LEN          ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 58 
 59 #define GEM_MTU_MIN_SIZE        68
 60 
 61 #define MACB_WOL_HAS_MAGIC_PACKET       (0x1 << 0)
 62 #define MACB_WOL_ENABLED                (0x1 << 1)
 63 
 64 /* Graceful stop timeouts in us. We should allow up to
 65  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 66  */
 67 #define MACB_HALT_TIMEOUT       1230
 68 
 69 /* Ring buffer accessors */
 70 static unsigned int macb_tx_ring_wrap(unsigned int index)
 71 {
 72         return index & (TX_RING_SIZE - 1);
 73 }
 74 
 75 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
 76                                           unsigned int index)
 77 {
 78         return &queue->tx_ring[macb_tx_ring_wrap(index)];
 79 }
 80 
 81 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
 82                                        unsigned int index)
 83 {
 84         return &queue->tx_skb[macb_tx_ring_wrap(index)];
 85 }
 86 
 87 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
 88 {
 89         dma_addr_t offset;
 90 
 91         offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
 92 
 93         return queue->tx_ring_dma + offset;
 94 }
 95 
 96 static unsigned int macb_rx_ring_wrap(unsigned int index)
 97 {
 98         return index & (RX_RING_SIZE - 1);
 99 }
100 
101 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
102 {
103         return &bp->rx_ring[macb_rx_ring_wrap(index)];
104 }
105 
106 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
107 {
108         return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
109 }
110 
111 /* I/O accessors */
112 static u32 hw_readl_native(struct macb *bp, int offset)
113 {
114         return __raw_readl(bp->regs + offset);
115 }
116 
117 static void hw_writel_native(struct macb *bp, int offset, u32 value)
118 {
119         __raw_writel(value, bp->regs + offset);
120 }
121 
122 static u32 hw_readl(struct macb *bp, int offset)
123 {
124         return readl_relaxed(bp->regs + offset);
125 }
126 
127 static void hw_writel(struct macb *bp, int offset, u32 value)
128 {
129         writel_relaxed(value, bp->regs + offset);
130 }
131 
132 /* Find the CPU endianness by using the loopback bit of NCR register. When the
133  * CPU is in big endian we need to program swapped mode for management
134  * descriptor access.
135  */
136 static bool hw_is_native_io(void __iomem *addr)
137 {
138         u32 value = MACB_BIT(LLB);
139 
140         __raw_writel(value, addr + MACB_NCR);
141         value = __raw_readl(addr + MACB_NCR);
142 
143         /* Write 0 back to disable everything */
144         __raw_writel(0, addr + MACB_NCR);
145 
146         return value == MACB_BIT(LLB);
147 }
148 
149 static bool hw_is_gem(void __iomem *addr, bool native_io)
150 {
151         u32 id;
152 
153         if (native_io)
154                 id = __raw_readl(addr + MACB_MID);
155         else
156                 id = readl_relaxed(addr + MACB_MID);
157 
158         return MACB_BFEXT(IDNUM, id) >= 0x2;
159 }
160 
161 static void macb_set_hwaddr(struct macb *bp)
162 {
163         u32 bottom;
164         u16 top;
165 
166         bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
167         macb_or_gem_writel(bp, SA1B, bottom);
168         top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
169         macb_or_gem_writel(bp, SA1T, top);
170 
171         /* Clear unused address register sets */
172         macb_or_gem_writel(bp, SA2B, 0);
173         macb_or_gem_writel(bp, SA2T, 0);
174         macb_or_gem_writel(bp, SA3B, 0);
175         macb_or_gem_writel(bp, SA3T, 0);
176         macb_or_gem_writel(bp, SA4B, 0);
177         macb_or_gem_writel(bp, SA4T, 0);
178 }
179 
180 static void macb_get_hwaddr(struct macb *bp)
181 {
182         struct macb_platform_data *pdata;
183         u32 bottom;
184         u16 top;
185         u8 addr[6];
186         int i;
187 
188         pdata = dev_get_platdata(&bp->pdev->dev);
189 
190         /* Check all 4 address register for valid address */
191         for (i = 0; i < 4; i++) {
192                 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
193                 top = macb_or_gem_readl(bp, SA1T + i * 8);
194 
195                 if (pdata && pdata->rev_eth_addr) {
196                         addr[5] = bottom & 0xff;
197                         addr[4] = (bottom >> 8) & 0xff;
198                         addr[3] = (bottom >> 16) & 0xff;
199                         addr[2] = (bottom >> 24) & 0xff;
200                         addr[1] = top & 0xff;
201                         addr[0] = (top & 0xff00) >> 8;
202                 } else {
203                         addr[0] = bottom & 0xff;
204                         addr[1] = (bottom >> 8) & 0xff;
205                         addr[2] = (bottom >> 16) & 0xff;
206                         addr[3] = (bottom >> 24) & 0xff;
207                         addr[4] = top & 0xff;
208                         addr[5] = (top >> 8) & 0xff;
209                 }
210 
211                 if (is_valid_ether_addr(addr)) {
212                         memcpy(bp->dev->dev_addr, addr, sizeof(addr));
213                         return;
214                 }
215         }
216 
217         dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
218         eth_hw_addr_random(bp->dev);
219 }
220 
221 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
222 {
223         struct macb *bp = bus->priv;
224         int value;
225 
226         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
227                               | MACB_BF(RW, MACB_MAN_READ)
228                               | MACB_BF(PHYA, mii_id)
229                               | MACB_BF(REGA, regnum)
230                               | MACB_BF(CODE, MACB_MAN_CODE)));
231 
232         /* wait for end of transfer */
233         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
234                 cpu_relax();
235 
236         value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
237 
238         return value;
239 }
240 
241 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
242                            u16 value)
243 {
244         struct macb *bp = bus->priv;
245 
246         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
247                               | MACB_BF(RW, MACB_MAN_WRITE)
248                               | MACB_BF(PHYA, mii_id)
249                               | MACB_BF(REGA, regnum)
250                               | MACB_BF(CODE, MACB_MAN_CODE)
251                               | MACB_BF(DATA, value)));
252 
253         /* wait for end of transfer */
254         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
255                 cpu_relax();
256 
257         return 0;
258 }
259 
260 /**
261  * macb_set_tx_clk() - Set a clock to a new frequency
262  * @clk         Pointer to the clock to change
263  * @rate        New frequency in Hz
264  * @dev         Pointer to the struct net_device
265  */
266 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
267 {
268         long ferr, rate, rate_rounded;
269 
270         if (!clk)
271                 return;
272 
273         switch (speed) {
274         case SPEED_10:
275                 rate = 2500000;
276                 break;
277         case SPEED_100:
278                 rate = 25000000;
279                 break;
280         case SPEED_1000:
281                 rate = 125000000;
282                 break;
283         default:
284                 return;
285         }
286 
287         rate_rounded = clk_round_rate(clk, rate);
288         if (rate_rounded < 0)
289                 return;
290 
291         /* RGMII allows 50 ppm frequency error. Test and warn if this limit
292          * is not satisfied.
293          */
294         ferr = abs(rate_rounded - rate);
295         ferr = DIV_ROUND_UP(ferr, rate / 100000);
296         if (ferr > 5)
297                 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
298                             rate);
299 
300         if (clk_set_rate(clk, rate_rounded))
301                 netdev_err(dev, "adjusting tx_clk failed.\n");
302 }
303 
304 static void macb_handle_link_change(struct net_device *dev)
305 {
306         struct macb *bp = netdev_priv(dev);
307         struct phy_device *phydev = dev->phydev;
308         unsigned long flags;
309         int status_change = 0;
310 
311         spin_lock_irqsave(&bp->lock, flags);
312 
313         if (phydev->link) {
314                 if ((bp->speed != phydev->speed) ||
315                     (bp->duplex != phydev->duplex)) {
316                         u32 reg;
317 
318                         reg = macb_readl(bp, NCFGR);
319                         reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
320                         if (macb_is_gem(bp))
321                                 reg &= ~GEM_BIT(GBE);
322 
323                         if (phydev->duplex)
324                                 reg |= MACB_BIT(FD);
325                         if (phydev->speed == SPEED_100)
326                                 reg |= MACB_BIT(SPD);
327                         if (phydev->speed == SPEED_1000 &&
328                             bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
329                                 reg |= GEM_BIT(GBE);
330 
331                         macb_or_gem_writel(bp, NCFGR, reg);
332 
333                         bp->speed = phydev->speed;
334                         bp->duplex = phydev->duplex;
335                         status_change = 1;
336                 }
337         }
338 
339         if (phydev->link != bp->link) {
340                 if (!phydev->link) {
341                         bp->speed = 0;
342                         bp->duplex = -1;
343                 }
344                 bp->link = phydev->link;
345 
346                 status_change = 1;
347         }
348 
349         spin_unlock_irqrestore(&bp->lock, flags);
350 
351         if (status_change) {
352                 if (phydev->link) {
353                         /* Update the TX clock rate if and only if the link is
354                          * up and there has been a link change.
355                          */
356                         macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
357 
358                         netif_carrier_on(dev);
359                         netdev_info(dev, "link up (%d/%s)\n",
360                                     phydev->speed,
361                                     phydev->duplex == DUPLEX_FULL ?
362                                     "Full" : "Half");
363                 } else {
364                         netif_carrier_off(dev);
365                         netdev_info(dev, "link down\n");
366                 }
367         }
368 }
369 
370 /* based on au1000_eth. c*/
371 static int macb_mii_probe(struct net_device *dev)
372 {
373         struct macb *bp = netdev_priv(dev);
374         struct macb_platform_data *pdata;
375         struct phy_device *phydev;
376         int phy_irq;
377         int ret;
378 
379         phydev = phy_find_first(bp->mii_bus);
380         if (!phydev) {
381                 netdev_err(dev, "no PHY found\n");
382                 return -ENXIO;
383         }
384 
385         pdata = dev_get_platdata(&bp->pdev->dev);
386         if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
387                 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
388                                         "phy int");
389                 if (!ret) {
390                         phy_irq = gpio_to_irq(pdata->phy_irq_pin);
391                         phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
392                 }
393         }
394 
395         /* attach the mac to the phy */
396         ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
397                                  bp->phy_interface);
398         if (ret) {
399                 netdev_err(dev, "Could not attach to PHY\n");
400                 return ret;
401         }
402 
403         /* mask with MAC supported features */
404         if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
405                 phydev->supported &= PHY_GBIT_FEATURES;
406         else
407                 phydev->supported &= PHY_BASIC_FEATURES;
408 
409         if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
410                 phydev->supported &= ~SUPPORTED_1000baseT_Half;
411 
412         phydev->advertising = phydev->supported;
413 
414         bp->link = 0;
415         bp->speed = 0;
416         bp->duplex = -1;
417 
418         return 0;
419 }
420 
421 static int macb_mii_init(struct macb *bp)
422 {
423         struct macb_platform_data *pdata;
424         struct device_node *np;
425         int err = -ENXIO, i;
426 
427         /* Enable management port */
428         macb_writel(bp, NCR, MACB_BIT(MPE));
429 
430         bp->mii_bus = mdiobus_alloc();
431         if (!bp->mii_bus) {
432                 err = -ENOMEM;
433                 goto err_out;
434         }
435 
436         bp->mii_bus->name = "MACB_mii_bus";
437         bp->mii_bus->read = &macb_mdio_read;
438         bp->mii_bus->write = &macb_mdio_write;
439         snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
440                  bp->pdev->name, bp->pdev->id);
441         bp->mii_bus->priv = bp;
442         bp->mii_bus->parent = &bp->pdev->dev;
443         pdata = dev_get_platdata(&bp->pdev->dev);
444 
445         dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
446 
447         np = bp->pdev->dev.of_node;
448         if (np) {
449                 /* try dt phy registration */
450                 err = of_mdiobus_register(bp->mii_bus, np);
451 
452                 /* fallback to standard phy registration if no phy were
453                  * found during dt phy registration
454                  */
455                 if (!err && !phy_find_first(bp->mii_bus)) {
456                         for (i = 0; i < PHY_MAX_ADDR; i++) {
457                                 struct phy_device *phydev;
458 
459                                 phydev = mdiobus_scan(bp->mii_bus, i);
460                                 if (IS_ERR(phydev) &&
461                                     PTR_ERR(phydev) != -ENODEV) {
462                                         err = PTR_ERR(phydev);
463                                         break;
464                                 }
465                         }
466 
467                         if (err)
468                                 goto err_out_unregister_bus;
469                 }
470         } else {
471                 if (pdata)
472                         bp->mii_bus->phy_mask = pdata->phy_mask;
473 
474                 err = mdiobus_register(bp->mii_bus);
475         }
476 
477         if (err)
478                 goto err_out_free_mdiobus;
479 
480         err = macb_mii_probe(bp->dev);
481         if (err)
482                 goto err_out_unregister_bus;
483 
484         return 0;
485 
486 err_out_unregister_bus:
487         mdiobus_unregister(bp->mii_bus);
488 err_out_free_mdiobus:
489         mdiobus_free(bp->mii_bus);
490 err_out:
491         return err;
492 }
493 
494 static void macb_update_stats(struct macb *bp)
495 {
496         u32 *p = &bp->hw_stats.macb.rx_pause_frames;
497         u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
498         int offset = MACB_PFR;
499 
500         WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
501 
502         for (; p < end; p++, offset += 4)
503                 *p += bp->macb_reg_readl(bp, offset);
504 }
505 
506 static int macb_halt_tx(struct macb *bp)
507 {
508         unsigned long   halt_time, timeout;
509         u32             status;
510 
511         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
512 
513         timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
514         do {
515                 halt_time = jiffies;
516                 status = macb_readl(bp, TSR);
517                 if (!(status & MACB_BIT(TGO)))
518                         return 0;
519 
520                 usleep_range(10, 250);
521         } while (time_before(halt_time, timeout));
522 
523         return -ETIMEDOUT;
524 }
525 
526 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
527 {
528         if (tx_skb->mapping) {
529                 if (tx_skb->mapped_as_page)
530                         dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
531                                        tx_skb->size, DMA_TO_DEVICE);
532                 else
533                         dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
534                                          tx_skb->size, DMA_TO_DEVICE);
535                 tx_skb->mapping = 0;
536         }
537 
538         if (tx_skb->skb) {
539                 dev_kfree_skb_any(tx_skb->skb);
540                 tx_skb->skb = NULL;
541         }
542 }
543 
544 static void macb_tx_error_task(struct work_struct *work)
545 {
546         struct macb_queue       *queue = container_of(work, struct macb_queue,
547                                                       tx_error_task);
548         struct macb             *bp = queue->bp;
549         struct macb_tx_skb      *tx_skb;
550         struct macb_dma_desc    *desc;
551         struct sk_buff          *skb;
552         unsigned int            tail;
553         unsigned long           flags;
554 
555         netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
556                     (unsigned int)(queue - bp->queues),
557                     queue->tx_tail, queue->tx_head);
558 
559         /* Prevent the queue IRQ handlers from running: each of them may call
560          * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
561          * As explained below, we have to halt the transmission before updating
562          * TBQP registers so we call netif_tx_stop_all_queues() to notify the
563          * network engine about the macb/gem being halted.
564          */
565         spin_lock_irqsave(&bp->lock, flags);
566 
567         /* Make sure nobody is trying to queue up new packets */
568         netif_tx_stop_all_queues(bp->dev);
569 
570         /* Stop transmission now
571          * (in case we have just queued new packets)
572          * macb/gem must be halted to write TBQP register
573          */
574         if (macb_halt_tx(bp))
575                 /* Just complain for now, reinitializing TX path can be good */
576                 netdev_err(bp->dev, "BUG: halt tx timed out\n");
577 
578         /* Treat frames in TX queue including the ones that caused the error.
579          * Free transmit buffers in upper layer.
580          */
581         for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
582                 u32     ctrl;
583 
584                 desc = macb_tx_desc(queue, tail);
585                 ctrl = desc->ctrl;
586                 tx_skb = macb_tx_skb(queue, tail);
587                 skb = tx_skb->skb;
588 
589                 if (ctrl & MACB_BIT(TX_USED)) {
590                         /* skb is set for the last buffer of the frame */
591                         while (!skb) {
592                                 macb_tx_unmap(bp, tx_skb);
593                                 tail++;
594                                 tx_skb = macb_tx_skb(queue, tail);
595                                 skb = tx_skb->skb;
596                         }
597 
598                         /* ctrl still refers to the first buffer descriptor
599                          * since it's the only one written back by the hardware
600                          */
601                         if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
602                                 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
603                                             macb_tx_ring_wrap(tail), skb->data);
604                                 bp->stats.tx_packets++;
605                                 bp->stats.tx_bytes += skb->len;
606                         }
607                 } else {
608                         /* "Buffers exhausted mid-frame" errors may only happen
609                          * if the driver is buggy, so complain loudly about
610                          * those. Statistics are updated by hardware.
611                          */
612                         if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
613                                 netdev_err(bp->dev,
614                                            "BUG: TX buffers exhausted mid-frame\n");
615 
616                         desc->ctrl = ctrl | MACB_BIT(TX_USED);
617                 }
618 
619                 macb_tx_unmap(bp, tx_skb);
620         }
621 
622         /* Set end of TX queue */
623         desc = macb_tx_desc(queue, 0);
624         desc->addr = 0;
625         desc->ctrl = MACB_BIT(TX_USED);
626 
627         /* Make descriptor updates visible to hardware */
628         wmb();
629 
630         /* Reinitialize the TX desc queue */
631         queue_writel(queue, TBQP, queue->tx_ring_dma);
632         /* Make TX ring reflect state of hardware */
633         queue->tx_head = 0;
634         queue->tx_tail = 0;
635 
636         /* Housework before enabling TX IRQ */
637         macb_writel(bp, TSR, macb_readl(bp, TSR));
638         queue_writel(queue, IER, MACB_TX_INT_FLAGS);
639 
640         /* Now we are ready to start transmission again */
641         netif_tx_start_all_queues(bp->dev);
642         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
643 
644         spin_unlock_irqrestore(&bp->lock, flags);
645 }
646 
647 static void macb_tx_interrupt(struct macb_queue *queue)
648 {
649         unsigned int tail;
650         unsigned int head;
651         u32 status;
652         struct macb *bp = queue->bp;
653         u16 queue_index = queue - bp->queues;
654 
655         status = macb_readl(bp, TSR);
656         macb_writel(bp, TSR, status);
657 
658         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
659                 queue_writel(queue, ISR, MACB_BIT(TCOMP));
660 
661         netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
662                     (unsigned long)status);
663 
664         head = queue->tx_head;
665         for (tail = queue->tx_tail; tail != head; tail++) {
666                 struct macb_tx_skb      *tx_skb;
667                 struct sk_buff          *skb;
668                 struct macb_dma_desc    *desc;
669                 u32                     ctrl;
670 
671                 desc = macb_tx_desc(queue, tail);
672 
673                 /* Make hw descriptor updates visible to CPU */
674                 rmb();
675 
676                 ctrl = desc->ctrl;
677 
678                 /* TX_USED bit is only set by hardware on the very first buffer
679                  * descriptor of the transmitted frame.
680                  */
681                 if (!(ctrl & MACB_BIT(TX_USED)))
682                         break;
683 
684                 /* Process all buffers of the current transmitted frame */
685                 for (;; tail++) {
686                         tx_skb = macb_tx_skb(queue, tail);
687                         skb = tx_skb->skb;
688 
689                         /* First, update TX stats if needed */
690                         if (skb) {
691                                 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
692                                             macb_tx_ring_wrap(tail), skb->data);
693                                 bp->stats.tx_packets++;
694                                 bp->stats.tx_bytes += skb->len;
695                         }
696 
697                         /* Now we can safely release resources */
698                         macb_tx_unmap(bp, tx_skb);
699 
700                         /* skb is set only for the last buffer of the frame.
701                          * WARNING: at this point skb has been freed by
702                          * macb_tx_unmap().
703                          */
704                         if (skb)
705                                 break;
706                 }
707         }
708 
709         queue->tx_tail = tail;
710         if (__netif_subqueue_stopped(bp->dev, queue_index) &&
711             CIRC_CNT(queue->tx_head, queue->tx_tail,
712                      TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
713                 netif_wake_subqueue(bp->dev, queue_index);
714 }
715 
716 static void gem_rx_refill(struct macb *bp)
717 {
718         unsigned int            entry;
719         struct sk_buff          *skb;
720         dma_addr_t              paddr;
721 
722         while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
723                           RX_RING_SIZE) > 0) {
724                 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
725 
726                 /* Make hw descriptor updates visible to CPU */
727                 rmb();
728 
729                 bp->rx_prepared_head++;
730 
731                 if (!bp->rx_skbuff[entry]) {
732                         /* allocate sk_buff for this free entry in ring */
733                         skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
734                         if (unlikely(!skb)) {
735                                 netdev_err(bp->dev,
736                                            "Unable to allocate sk_buff\n");
737                                 break;
738                         }
739 
740                         /* now fill corresponding descriptor entry */
741                         paddr = dma_map_single(&bp->pdev->dev, skb->data,
742                                                bp->rx_buffer_size,
743                                                DMA_FROM_DEVICE);
744                         if (dma_mapping_error(&bp->pdev->dev, paddr)) {
745                                 dev_kfree_skb(skb);
746                                 break;
747                         }
748 
749                         bp->rx_skbuff[entry] = skb;
750 
751                         if (entry == RX_RING_SIZE - 1)
752                                 paddr |= MACB_BIT(RX_WRAP);
753                         bp->rx_ring[entry].addr = paddr;
754                         bp->rx_ring[entry].ctrl = 0;
755 
756                         /* properly align Ethernet header */
757                         skb_reserve(skb, NET_IP_ALIGN);
758                 } else {
759                         bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
760                         bp->rx_ring[entry].ctrl = 0;
761                 }
762         }
763 
764         /* Make descriptor updates visible to hardware */
765         wmb();
766 
767         netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
768                     bp->rx_prepared_head, bp->rx_tail);
769 }
770 
771 /* Mark DMA descriptors from begin up to and not including end as unused */
772 static void discard_partial_frame(struct macb *bp, unsigned int begin,
773                                   unsigned int end)
774 {
775         unsigned int frag;
776 
777         for (frag = begin; frag != end; frag++) {
778                 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
779 
780                 desc->addr &= ~MACB_BIT(RX_USED);
781         }
782 
783         /* Make descriptor updates visible to hardware */
784         wmb();
785 
786         /* When this happens, the hardware stats registers for
787          * whatever caused this is updated, so we don't have to record
788          * anything.
789          */
790 }
791 
792 static int gem_rx(struct macb *bp, int budget)
793 {
794         unsigned int            len;
795         unsigned int            entry;
796         struct sk_buff          *skb;
797         struct macb_dma_desc    *desc;
798         int                     count = 0;
799 
800         while (count < budget) {
801                 u32 addr, ctrl;
802 
803                 entry = macb_rx_ring_wrap(bp->rx_tail);
804                 desc = &bp->rx_ring[entry];
805 
806                 /* Make hw descriptor updates visible to CPU */
807                 rmb();
808 
809                 addr = desc->addr;
810                 ctrl = desc->ctrl;
811 
812                 if (!(addr & MACB_BIT(RX_USED)))
813                         break;
814 
815                 bp->rx_tail++;
816                 count++;
817 
818                 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
819                         netdev_err(bp->dev,
820                                    "not whole frame pointed by descriptor\n");
821                         bp->stats.rx_dropped++;
822                         break;
823                 }
824                 skb = bp->rx_skbuff[entry];
825                 if (unlikely(!skb)) {
826                         netdev_err(bp->dev,
827                                    "inconsistent Rx descriptor chain\n");
828                         bp->stats.rx_dropped++;
829                         break;
830                 }
831                 /* now everything is ready for receiving packet */
832                 bp->rx_skbuff[entry] = NULL;
833                 len = ctrl & bp->rx_frm_len_mask;
834 
835                 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
836 
837                 skb_put(skb, len);
838                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
839                 dma_unmap_single(&bp->pdev->dev, addr,
840                                  bp->rx_buffer_size, DMA_FROM_DEVICE);
841 
842                 skb->protocol = eth_type_trans(skb, bp->dev);
843                 skb_checksum_none_assert(skb);
844                 if (bp->dev->features & NETIF_F_RXCSUM &&
845                     !(bp->dev->flags & IFF_PROMISC) &&
846                     GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
847                         skb->ip_summed = CHECKSUM_UNNECESSARY;
848 
849                 bp->stats.rx_packets++;
850                 bp->stats.rx_bytes += skb->len;
851 
852 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
853                 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
854                             skb->len, skb->csum);
855                 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
856                                skb_mac_header(skb), 16, true);
857                 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
858                                skb->data, 32, true);
859 #endif
860 
861                 netif_receive_skb(skb);
862         }
863 
864         gem_rx_refill(bp);
865 
866         return count;
867 }
868 
869 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
870                          unsigned int last_frag)
871 {
872         unsigned int len;
873         unsigned int frag;
874         unsigned int offset;
875         struct sk_buff *skb;
876         struct macb_dma_desc *desc;
877 
878         desc = macb_rx_desc(bp, last_frag);
879         len = desc->ctrl & bp->rx_frm_len_mask;
880 
881         netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
882                     macb_rx_ring_wrap(first_frag),
883                     macb_rx_ring_wrap(last_frag), len);
884 
885         /* The ethernet header starts NET_IP_ALIGN bytes into the
886          * first buffer. Since the header is 14 bytes, this makes the
887          * payload word-aligned.
888          *
889          * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
890          * the two padding bytes into the skb so that we avoid hitting
891          * the slowpath in memcpy(), and pull them off afterwards.
892          */
893         skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
894         if (!skb) {
895                 bp->stats.rx_dropped++;
896                 for (frag = first_frag; ; frag++) {
897                         desc = macb_rx_desc(bp, frag);
898                         desc->addr &= ~MACB_BIT(RX_USED);
899                         if (frag == last_frag)
900                                 break;
901                 }
902 
903                 /* Make descriptor updates visible to hardware */
904                 wmb();
905 
906                 return 1;
907         }
908 
909         offset = 0;
910         len += NET_IP_ALIGN;
911         skb_checksum_none_assert(skb);
912         skb_put(skb, len);
913 
914         for (frag = first_frag; ; frag++) {
915                 unsigned int frag_len = bp->rx_buffer_size;
916 
917                 if (offset + frag_len > len) {
918                         if (unlikely(frag != last_frag)) {
919                                 dev_kfree_skb_any(skb);
920                                 return -1;
921                         }
922                         frag_len = len - offset;
923                 }
924                 skb_copy_to_linear_data_offset(skb, offset,
925                                                macb_rx_buffer(bp, frag),
926                                                frag_len);
927                 offset += bp->rx_buffer_size;
928                 desc = macb_rx_desc(bp, frag);
929                 desc->addr &= ~MACB_BIT(RX_USED);
930 
931                 if (frag == last_frag)
932                         break;
933         }
934 
935         /* Make descriptor updates visible to hardware */
936         wmb();
937 
938         __skb_pull(skb, NET_IP_ALIGN);
939         skb->protocol = eth_type_trans(skb, bp->dev);
940 
941         bp->stats.rx_packets++;
942         bp->stats.rx_bytes += skb->len;
943         netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
944                     skb->len, skb->csum);
945         netif_receive_skb(skb);
946 
947         return 0;
948 }
949 
950 static inline void macb_init_rx_ring(struct macb *bp)
951 {
952         dma_addr_t addr;
953         int i;
954 
955         addr = bp->rx_buffers_dma;
956         for (i = 0; i < RX_RING_SIZE; i++) {
957                 bp->rx_ring[i].addr = addr;
958                 bp->rx_ring[i].ctrl = 0;
959                 addr += bp->rx_buffer_size;
960         }
961         bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
962 }
963 
964 static int macb_rx(struct macb *bp, int budget)
965 {
966         bool reset_rx_queue = false;
967         int received = 0;
968         unsigned int tail;
969         int first_frag = -1;
970 
971         for (tail = bp->rx_tail; budget > 0; tail++) {
972                 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
973                 u32 addr, ctrl;
974 
975                 /* Make hw descriptor updates visible to CPU */
976                 rmb();
977 
978                 addr = desc->addr;
979                 ctrl = desc->ctrl;
980 
981                 if (!(addr & MACB_BIT(RX_USED)))
982                         break;
983 
984                 if (ctrl & MACB_BIT(RX_SOF)) {
985                         if (first_frag != -1)
986                                 discard_partial_frame(bp, first_frag, tail);
987                         first_frag = tail;
988                 }
989 
990                 if (ctrl & MACB_BIT(RX_EOF)) {
991                         int dropped;
992 
993                         if (unlikely(first_frag == -1)) {
994                                 reset_rx_queue = true;
995                                 continue;
996                         }
997 
998                         dropped = macb_rx_frame(bp, first_frag, tail);
999                         first_frag = -1;
1000                         if (unlikely(dropped < 0)) {
1001                                 reset_rx_queue = true;
1002                                 continue;
1003                         }
1004                         if (!dropped) {
1005                                 received++;
1006                                 budget--;
1007                         }
1008                 }
1009         }
1010 
1011         if (unlikely(reset_rx_queue)) {
1012                 unsigned long flags;
1013                 u32 ctrl;
1014 
1015                 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1016 
1017                 spin_lock_irqsave(&bp->lock, flags);
1018 
1019                 ctrl = macb_readl(bp, NCR);
1020                 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1021 
1022                 macb_init_rx_ring(bp);
1023                 macb_writel(bp, RBQP, bp->rx_ring_dma);
1024 
1025                 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1026 
1027                 spin_unlock_irqrestore(&bp->lock, flags);
1028                 return received;
1029         }
1030 
1031         if (first_frag != -1)
1032                 bp->rx_tail = first_frag;
1033         else
1034                 bp->rx_tail = tail;
1035 
1036         return received;
1037 }
1038 
1039 static int macb_poll(struct napi_struct *napi, int budget)
1040 {
1041         struct macb *bp = container_of(napi, struct macb, napi);
1042         int work_done;
1043         u32 status;
1044 
1045         status = macb_readl(bp, RSR);
1046         macb_writel(bp, RSR, status);
1047 
1048         work_done = 0;
1049 
1050         netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1051                     (unsigned long)status, budget);
1052 
1053         work_done = bp->macbgem_ops.mog_rx(bp, budget);
1054         if (work_done < budget) {
1055                 napi_complete(napi);
1056 
1057                 /* Packets received while interrupts were disabled */
1058                 status = macb_readl(bp, RSR);
1059                 if (status) {
1060                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1061                                 macb_writel(bp, ISR, MACB_BIT(RCOMP));
1062                         napi_reschedule(napi);
1063                 } else {
1064                         macb_writel(bp, IER, MACB_RX_INT_FLAGS);
1065                 }
1066         }
1067 
1068         /* TODO: Handle errors */
1069 
1070         return work_done;
1071 }
1072 
1073 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1074 {
1075         struct macb_queue *queue = dev_id;
1076         struct macb *bp = queue->bp;
1077         struct net_device *dev = bp->dev;
1078         u32 status, ctrl;
1079 
1080         status = queue_readl(queue, ISR);
1081 
1082         if (unlikely(!status))
1083                 return IRQ_NONE;
1084 
1085         spin_lock(&bp->lock);
1086 
1087         while (status) {
1088                 /* close possible race with dev_close */
1089                 if (unlikely(!netif_running(dev))) {
1090                         queue_writel(queue, IDR, -1);
1091                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1092                                 queue_writel(queue, ISR, -1);
1093                         break;
1094                 }
1095 
1096                 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1097                             (unsigned int)(queue - bp->queues),
1098                             (unsigned long)status);
1099 
1100                 if (status & MACB_RX_INT_FLAGS) {
1101                         /* There's no point taking any more interrupts
1102                          * until we have processed the buffers. The
1103                          * scheduling call may fail if the poll routine
1104                          * is already scheduled, so disable interrupts
1105                          * now.
1106                          */
1107                         queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1108                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1109                                 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1110 
1111                         if (napi_schedule_prep(&bp->napi)) {
1112                                 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1113                                 __napi_schedule(&bp->napi);
1114                         }
1115                 }
1116 
1117                 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1118                         queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1119                         schedule_work(&queue->tx_error_task);
1120 
1121                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1122                                 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1123 
1124                         break;
1125                 }
1126 
1127                 if (status & MACB_BIT(TCOMP))
1128                         macb_tx_interrupt(queue);
1129 
1130                 /* Link change detection isn't possible with RMII, so we'll
1131                  * add that if/when we get our hands on a full-blown MII PHY.
1132                  */
1133 
1134                 /* There is a hardware issue under heavy load where DMA can
1135                  * stop, this causes endless "used buffer descriptor read"
1136                  * interrupts but it can be cleared by re-enabling RX. See
1137                  * the at91 manual, section 41.3.1 or the Zynq manual
1138                  * section 16.7.4 for details.
1139                  */
1140                 if (status & MACB_BIT(RXUBR)) {
1141                         ctrl = macb_readl(bp, NCR);
1142                         macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1143                         macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1144 
1145                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1146                                 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1147                 }
1148 
1149                 if (status & MACB_BIT(ISR_ROVR)) {
1150                         /* We missed at least one packet */
1151                         if (macb_is_gem(bp))
1152                                 bp->hw_stats.gem.rx_overruns++;
1153                         else
1154                                 bp->hw_stats.macb.rx_overruns++;
1155 
1156                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1157                                 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1158                 }
1159 
1160                 if (status & MACB_BIT(HRESP)) {
1161                         /* TODO: Reset the hardware, and maybe move the
1162                          * netdev_err to a lower-priority context as well
1163                          * (work queue?)
1164                          */
1165                         netdev_err(dev, "DMA bus error: HRESP not OK\n");
1166 
1167                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1168                                 queue_writel(queue, ISR, MACB_BIT(HRESP));
1169                 }
1170 
1171                 status = queue_readl(queue, ISR);
1172         }
1173 
1174         spin_unlock(&bp->lock);
1175 
1176         return IRQ_HANDLED;
1177 }
1178 
1179 #ifdef CONFIG_NET_POLL_CONTROLLER
1180 /* Polling receive - used by netconsole and other diagnostic tools
1181  * to allow network i/o with interrupts disabled.
1182  */
1183 static void macb_poll_controller(struct net_device *dev)
1184 {
1185         struct macb *bp = netdev_priv(dev);
1186         struct macb_queue *queue;
1187         unsigned long flags;
1188         unsigned int q;
1189 
1190         local_irq_save(flags);
1191         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1192                 macb_interrupt(dev->irq, queue);
1193         local_irq_restore(flags);
1194 }
1195 #endif
1196 
1197 static unsigned int macb_tx_map(struct macb *bp,
1198                                 struct macb_queue *queue,
1199                                 struct sk_buff *skb)
1200 {
1201         dma_addr_t mapping;
1202         unsigned int len, entry, i, tx_head = queue->tx_head;
1203         struct macb_tx_skb *tx_skb = NULL;
1204         struct macb_dma_desc *desc;
1205         unsigned int offset, size, count = 0;
1206         unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1207         unsigned int eof = 1;
1208         u32 ctrl;
1209 
1210         /* First, map non-paged data */
1211         len = skb_headlen(skb);
1212         offset = 0;
1213         while (len) {
1214                 size = min(len, bp->max_tx_length);
1215                 entry = macb_tx_ring_wrap(tx_head);
1216                 tx_skb = &queue->tx_skb[entry];
1217 
1218                 mapping = dma_map_single(&bp->pdev->dev,
1219                                          skb->data + offset,
1220                                          size, DMA_TO_DEVICE);
1221                 if (dma_mapping_error(&bp->pdev->dev, mapping))
1222                         goto dma_error;
1223 
1224                 /* Save info to properly release resources */
1225                 tx_skb->skb = NULL;
1226                 tx_skb->mapping = mapping;
1227                 tx_skb->size = size;
1228                 tx_skb->mapped_as_page = false;
1229 
1230                 len -= size;
1231                 offset += size;
1232                 count++;
1233                 tx_head++;
1234         }
1235 
1236         /* Then, map paged data from fragments */
1237         for (f = 0; f < nr_frags; f++) {
1238                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1239 
1240                 len = skb_frag_size(frag);
1241                 offset = 0;
1242                 while (len) {
1243                         size = min(len, bp->max_tx_length);
1244                         entry = macb_tx_ring_wrap(tx_head);
1245                         tx_skb = &queue->tx_skb[entry];
1246 
1247                         mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1248                                                    offset, size, DMA_TO_DEVICE);
1249                         if (dma_mapping_error(&bp->pdev->dev, mapping))
1250                                 goto dma_error;
1251 
1252                         /* Save info to properly release resources */
1253                         tx_skb->skb = NULL;
1254                         tx_skb->mapping = mapping;
1255                         tx_skb->size = size;
1256                         tx_skb->mapped_as_page = true;
1257 
1258                         len -= size;
1259                         offset += size;
1260                         count++;
1261                         tx_head++;
1262                 }
1263         }
1264 
1265         /* Should never happen */
1266         if (unlikely(!tx_skb)) {
1267                 netdev_err(bp->dev, "BUG! empty skb!\n");
1268                 return 0;
1269         }
1270 
1271         /* This is the last buffer of the frame: save socket buffer */
1272         tx_skb->skb = skb;
1273 
1274         /* Update TX ring: update buffer descriptors in reverse order
1275          * to avoid race condition
1276          */
1277 
1278         /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1279          * to set the end of TX queue
1280          */
1281         i = tx_head;
1282         entry = macb_tx_ring_wrap(i);
1283         ctrl = MACB_BIT(TX_USED);
1284         desc = &queue->tx_ring[entry];
1285         desc->ctrl = ctrl;
1286 
1287         do {
1288                 i--;
1289                 entry = macb_tx_ring_wrap(i);
1290                 tx_skb = &queue->tx_skb[entry];
1291                 desc = &queue->tx_ring[entry];
1292 
1293                 ctrl = (u32)tx_skb->size;
1294                 if (eof) {
1295                         ctrl |= MACB_BIT(TX_LAST);
1296                         eof = 0;
1297                 }
1298                 if (unlikely(entry == (TX_RING_SIZE - 1)))
1299                         ctrl |= MACB_BIT(TX_WRAP);
1300 
1301                 /* Set TX buffer descriptor */
1302                 desc->addr = tx_skb->mapping;
1303                 /* desc->addr must be visible to hardware before clearing
1304                  * 'TX_USED' bit in desc->ctrl.
1305                  */
1306                 wmb();
1307                 desc->ctrl = ctrl;
1308         } while (i != queue->tx_head);
1309 
1310         queue->tx_head = tx_head;
1311 
1312         return count;
1313 
1314 dma_error:
1315         netdev_err(bp->dev, "TX DMA map failed\n");
1316 
1317         for (i = queue->tx_head; i != tx_head; i++) {
1318                 tx_skb = macb_tx_skb(queue, i);
1319 
1320                 macb_tx_unmap(bp, tx_skb);
1321         }
1322 
1323         return 0;
1324 }
1325 
1326 static inline int macb_clear_csum(struct sk_buff *skb)
1327 {
1328         /* no change for packets without checksum offloading */
1329         if (skb->ip_summed != CHECKSUM_PARTIAL)
1330                 return 0;
1331 
1332         /* make sure we can modify the header */
1333         if (unlikely(skb_cow_head(skb, 0)))
1334                 return -1;
1335 
1336         /* initialize checksum field
1337          * This is required - at least for Zynq, which otherwise calculates
1338          * wrong UDP header checksums for UDP packets with UDP data len <=2
1339          */
1340         *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1341         return 0;
1342 }
1343 
1344 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1345 {
1346         u16 queue_index = skb_get_queue_mapping(skb);
1347         struct macb *bp = netdev_priv(dev);
1348         struct macb_queue *queue = &bp->queues[queue_index];
1349         unsigned long flags;
1350         unsigned int count, nr_frags, frag_size, f;
1351 
1352 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1353         netdev_vdbg(bp->dev,
1354                     "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1355                     queue_index, skb->len, skb->head, skb->data,
1356                     skb_tail_pointer(skb), skb_end_pointer(skb));
1357         print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1358                        skb->data, 16, true);
1359 #endif
1360 
1361         /* Count how many TX buffer descriptors are needed to send this
1362          * socket buffer: skb fragments of jumbo frames may need to be
1363          * split into many buffer descriptors.
1364          */
1365         count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1366         nr_frags = skb_shinfo(skb)->nr_frags;
1367         for (f = 0; f < nr_frags; f++) {
1368                 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1369                 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1370         }
1371 
1372         spin_lock_irqsave(&bp->lock, flags);
1373 
1374         /* This is a hard error, log it. */
1375         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
1376                 netif_stop_subqueue(dev, queue_index);
1377                 spin_unlock_irqrestore(&bp->lock, flags);
1378                 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1379                            queue->tx_head, queue->tx_tail);
1380                 return NETDEV_TX_BUSY;
1381         }
1382 
1383         if (macb_clear_csum(skb)) {
1384                 dev_kfree_skb_any(skb);
1385                 return NETDEV_TX_OK;
1386         }
1387 
1388         /* Map socket buffer for DMA transfer */
1389         if (!macb_tx_map(bp, queue, skb)) {
1390                 dev_kfree_skb_any(skb);
1391                 goto unlock;
1392         }
1393 
1394         /* Make newly initialized descriptor visible to hardware */
1395         wmb();
1396 
1397         skb_tx_timestamp(skb);
1398 
1399         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1400 
1401         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
1402                 netif_stop_subqueue(dev, queue_index);
1403 
1404 unlock:
1405         spin_unlock_irqrestore(&bp->lock, flags);
1406 
1407         return NETDEV_TX_OK;
1408 }
1409 
1410 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1411 {
1412         if (!macb_is_gem(bp)) {
1413                 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1414         } else {
1415                 bp->rx_buffer_size = size;
1416 
1417                 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1418                         netdev_dbg(bp->dev,
1419                                    "RX buffer must be multiple of %d bytes, expanding\n",
1420                                    RX_BUFFER_MULTIPLE);
1421                         bp->rx_buffer_size =
1422                                 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1423                 }
1424         }
1425 
1426         netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1427                    bp->dev->mtu, bp->rx_buffer_size);
1428 }
1429 
1430 static void gem_free_rx_buffers(struct macb *bp)
1431 {
1432         struct sk_buff          *skb;
1433         struct macb_dma_desc    *desc;
1434         dma_addr_t              addr;
1435         int i;
1436 
1437         if (!bp->rx_skbuff)
1438                 return;
1439 
1440         for (i = 0; i < RX_RING_SIZE; i++) {
1441                 skb = bp->rx_skbuff[i];
1442 
1443                 if (!skb)
1444                         continue;
1445 
1446                 desc = &bp->rx_ring[i];
1447                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1448                 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1449                                  DMA_FROM_DEVICE);
1450                 dev_kfree_skb_any(skb);
1451                 skb = NULL;
1452         }
1453 
1454         kfree(bp->rx_skbuff);
1455         bp->rx_skbuff = NULL;
1456 }
1457 
1458 static void macb_free_rx_buffers(struct macb *bp)
1459 {
1460         if (bp->rx_buffers) {
1461                 dma_free_coherent(&bp->pdev->dev,
1462                                   RX_RING_SIZE * bp->rx_buffer_size,
1463                                   bp->rx_buffers, bp->rx_buffers_dma);
1464                 bp->rx_buffers = NULL;
1465         }
1466 }
1467 
1468 static void macb_free_consistent(struct macb *bp)
1469 {
1470         struct macb_queue *queue;
1471         unsigned int q;
1472 
1473         bp->macbgem_ops.mog_free_rx_buffers(bp);
1474         if (bp->rx_ring) {
1475                 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1476                                   bp->rx_ring, bp->rx_ring_dma);
1477                 bp->rx_ring = NULL;
1478         }
1479 
1480         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1481                 kfree(queue->tx_skb);
1482                 queue->tx_skb = NULL;
1483                 if (queue->tx_ring) {
1484                         dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1485                                           queue->tx_ring, queue->tx_ring_dma);
1486                         queue->tx_ring = NULL;
1487                 }
1488         }
1489 }
1490 
1491 static int gem_alloc_rx_buffers(struct macb *bp)
1492 {
1493         int size;
1494 
1495         size = RX_RING_SIZE * sizeof(struct sk_buff *);
1496         bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1497         if (!bp->rx_skbuff)
1498                 return -ENOMEM;
1499 
1500         netdev_dbg(bp->dev,
1501                    "Allocated %d RX struct sk_buff entries at %p\n",
1502                    RX_RING_SIZE, bp->rx_skbuff);
1503         return 0;
1504 }
1505 
1506 static int macb_alloc_rx_buffers(struct macb *bp)
1507 {
1508         int size;
1509 
1510         size = RX_RING_SIZE * bp->rx_buffer_size;
1511         bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1512                                             &bp->rx_buffers_dma, GFP_KERNEL);
1513         if (!bp->rx_buffers)
1514                 return -ENOMEM;
1515 
1516         netdev_dbg(bp->dev,
1517                    "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1518                    size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1519         return 0;
1520 }
1521 
1522 static int macb_alloc_consistent(struct macb *bp)
1523 {
1524         struct macb_queue *queue;
1525         unsigned int q;
1526         int size;
1527 
1528         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1529                 size = TX_RING_BYTES;
1530                 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1531                                                     &queue->tx_ring_dma,
1532                                                     GFP_KERNEL);
1533                 if (!queue->tx_ring)
1534                         goto out_err;
1535                 netdev_dbg(bp->dev,
1536                            "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1537                            q, size, (unsigned long)queue->tx_ring_dma,
1538                            queue->tx_ring);
1539 
1540                 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1541                 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1542                 if (!queue->tx_skb)
1543                         goto out_err;
1544         }
1545 
1546         size = RX_RING_BYTES;
1547         bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1548                                          &bp->rx_ring_dma, GFP_KERNEL);
1549         if (!bp->rx_ring)
1550                 goto out_err;
1551         netdev_dbg(bp->dev,
1552                    "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1553                    size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1554 
1555         if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1556                 goto out_err;
1557 
1558         return 0;
1559 
1560 out_err:
1561         macb_free_consistent(bp);
1562         return -ENOMEM;
1563 }
1564 
1565 static void gem_init_rings(struct macb *bp)
1566 {
1567         struct macb_queue *queue;
1568         unsigned int q;
1569         int i;
1570 
1571         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1572                 for (i = 0; i < TX_RING_SIZE; i++) {
1573                         queue->tx_ring[i].addr = 0;
1574                         queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1575                 }
1576                 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1577                 queue->tx_head = 0;
1578                 queue->tx_tail = 0;
1579         }
1580 
1581         bp->rx_tail = 0;
1582         bp->rx_prepared_head = 0;
1583 
1584         gem_rx_refill(bp);
1585 }
1586 
1587 static void macb_init_rings(struct macb *bp)
1588 {
1589         int i;
1590 
1591         macb_init_rx_ring(bp);
1592 
1593         for (i = 0; i < TX_RING_SIZE; i++) {
1594                 bp->queues[0].tx_ring[i].addr = 0;
1595                 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
1596         }
1597         bp->queues[0].tx_head = 0;
1598         bp->queues[0].tx_tail = 0;
1599         bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1600 
1601         bp->rx_tail = 0;
1602 }
1603 
1604 static void macb_reset_hw(struct macb *bp)
1605 {
1606         struct macb_queue *queue;
1607         unsigned int q;
1608 
1609         /* Disable RX and TX (XXX: Should we halt the transmission
1610          * more gracefully?)
1611          */
1612         macb_writel(bp, NCR, 0);
1613 
1614         /* Clear the stats registers (XXX: Update stats first?) */
1615         macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1616 
1617         /* Clear all status flags */
1618         macb_writel(bp, TSR, -1);
1619         macb_writel(bp, RSR, -1);
1620 
1621         /* Disable all interrupts */
1622         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1623                 queue_writel(queue, IDR, -1);
1624                 queue_readl(queue, ISR);
1625                 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1626                         queue_writel(queue, ISR, -1);
1627         }
1628 }
1629 
1630 static u32 gem_mdc_clk_div(struct macb *bp)
1631 {
1632         u32 config;
1633         unsigned long pclk_hz = clk_get_rate(bp->pclk);
1634 
1635         if (pclk_hz <= 20000000)
1636                 config = GEM_BF(CLK, GEM_CLK_DIV8);
1637         else if (pclk_hz <= 40000000)
1638                 config = GEM_BF(CLK, GEM_CLK_DIV16);
1639         else if (pclk_hz <= 80000000)
1640                 config = GEM_BF(CLK, GEM_CLK_DIV32);
1641         else if (pclk_hz <= 120000000)
1642                 config = GEM_BF(CLK, GEM_CLK_DIV48);
1643         else if (pclk_hz <= 160000000)
1644                 config = GEM_BF(CLK, GEM_CLK_DIV64);
1645         else
1646                 config = GEM_BF(CLK, GEM_CLK_DIV96);
1647 
1648         return config;
1649 }
1650 
1651 static u32 macb_mdc_clk_div(struct macb *bp)
1652 {
1653         u32 config;
1654         unsigned long pclk_hz;
1655 
1656         if (macb_is_gem(bp))
1657                 return gem_mdc_clk_div(bp);
1658 
1659         pclk_hz = clk_get_rate(bp->pclk);
1660         if (pclk_hz <= 20000000)
1661                 config = MACB_BF(CLK, MACB_CLK_DIV8);
1662         else if (pclk_hz <= 40000000)
1663                 config = MACB_BF(CLK, MACB_CLK_DIV16);
1664         else if (pclk_hz <= 80000000)
1665                 config = MACB_BF(CLK, MACB_CLK_DIV32);
1666         else
1667                 config = MACB_BF(CLK, MACB_CLK_DIV64);
1668 
1669         return config;
1670 }
1671 
1672 /* Get the DMA bus width field of the network configuration register that we
1673  * should program.  We find the width from decoding the design configuration
1674  * register to find the maximum supported data bus width.
1675  */
1676 static u32 macb_dbw(struct macb *bp)
1677 {
1678         if (!macb_is_gem(bp))
1679                 return 0;
1680 
1681         switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1682         case 4:
1683                 return GEM_BF(DBW, GEM_DBW128);
1684         case 2:
1685                 return GEM_BF(DBW, GEM_DBW64);
1686         case 1:
1687         default:
1688                 return GEM_BF(DBW, GEM_DBW32);
1689         }
1690 }
1691 
1692 /* Configure the receive DMA engine
1693  * - use the correct receive buffer size
1694  * - set best burst length for DMA operations
1695  *   (if not supported by FIFO, it will fallback to default)
1696  * - set both rx/tx packet buffers to full memory size
1697  * These are configurable parameters for GEM.
1698  */
1699 static void macb_configure_dma(struct macb *bp)
1700 {
1701         u32 dmacfg;
1702 
1703         if (macb_is_gem(bp)) {
1704                 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1705                 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1706                 if (bp->dma_burst_length)
1707                         dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1708                 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1709                 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1710 
1711                 if (bp->native_io)
1712                         dmacfg &= ~GEM_BIT(ENDIA_DESC);
1713                 else
1714                         dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1715 
1716                 if (bp->dev->features & NETIF_F_HW_CSUM)
1717                         dmacfg |= GEM_BIT(TXCOEN);
1718                 else
1719                         dmacfg &= ~GEM_BIT(TXCOEN);
1720                 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1721                            dmacfg);
1722                 gem_writel(bp, DMACFG, dmacfg);
1723         }
1724 }
1725 
1726 static void macb_init_hw(struct macb *bp)
1727 {
1728         struct macb_queue *queue;
1729         unsigned int q;
1730 
1731         u32 config;
1732 
1733         macb_reset_hw(bp);
1734         macb_set_hwaddr(bp);
1735 
1736         config = macb_mdc_clk_div(bp);
1737         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1738                 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
1739         config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
1740         config |= MACB_BIT(PAE);                /* PAuse Enable */
1741         config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
1742         if (bp->caps & MACB_CAPS_JUMBO)
1743                 config |= MACB_BIT(JFRAME);     /* Enable jumbo frames */
1744         else
1745                 config |= MACB_BIT(BIG);        /* Receive oversized frames */
1746         if (bp->dev->flags & IFF_PROMISC)
1747                 config |= MACB_BIT(CAF);        /* Copy All Frames */
1748         else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1749                 config |= GEM_BIT(RXCOEN);
1750         if (!(bp->dev->flags & IFF_BROADCAST))
1751                 config |= MACB_BIT(NBC);        /* No BroadCast */
1752         config |= macb_dbw(bp);
1753         macb_writel(bp, NCFGR, config);
1754         if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
1755                 gem_writel(bp, JML, bp->jumbo_max_len);
1756         bp->speed = SPEED_10;
1757         bp->duplex = DUPLEX_HALF;
1758         bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
1759         if (bp->caps & MACB_CAPS_JUMBO)
1760                 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
1761 
1762         macb_configure_dma(bp);
1763 
1764         /* Initialize TX and RX buffers */
1765         macb_writel(bp, RBQP, bp->rx_ring_dma);
1766         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1767                 queue_writel(queue, TBQP, queue->tx_ring_dma);
1768 
1769                 /* Enable interrupts */
1770                 queue_writel(queue, IER,
1771                              MACB_RX_INT_FLAGS |
1772                              MACB_TX_INT_FLAGS |
1773                              MACB_BIT(HRESP));
1774         }
1775 
1776         /* Enable TX and RX */
1777         macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1778 }
1779 
1780 /* The hash address register is 64 bits long and takes up two
1781  * locations in the memory map.  The least significant bits are stored
1782  * in EMAC_HSL and the most significant bits in EMAC_HSH.
1783  *
1784  * The unicast hash enable and the multicast hash enable bits in the
1785  * network configuration register enable the reception of hash matched
1786  * frames. The destination address is reduced to a 6 bit index into
1787  * the 64 bit hash register using the following hash function.  The
1788  * hash function is an exclusive or of every sixth bit of the
1789  * destination address.
1790  *
1791  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1792  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1793  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1794  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1795  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1796  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1797  *
1798  * da[0] represents the least significant bit of the first byte
1799  * received, that is, the multicast/unicast indicator, and da[47]
1800  * represents the most significant bit of the last byte received.  If
1801  * the hash index, hi[n], points to a bit that is set in the hash
1802  * register then the frame will be matched according to whether the
1803  * frame is multicast or unicast.  A multicast match will be signalled
1804  * if the multicast hash enable bit is set, da[0] is 1 and the hash
1805  * index points to a bit set in the hash register.  A unicast match
1806  * will be signalled if the unicast hash enable bit is set, da[0] is 0
1807  * and the hash index points to a bit set in the hash register.  To
1808  * receive all multicast frames, the hash register should be set with
1809  * all ones and the multicast hash enable bit should be set in the
1810  * network configuration register.
1811  */
1812 
1813 static inline int hash_bit_value(int bitnr, __u8 *addr)
1814 {
1815         if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1816                 return 1;
1817         return 0;
1818 }
1819 
1820 /* Return the hash index value for the specified address. */
1821 static int hash_get_index(__u8 *addr)
1822 {
1823         int i, j, bitval;
1824         int hash_index = 0;
1825 
1826         for (j = 0; j < 6; j++) {
1827                 for (i = 0, bitval = 0; i < 8; i++)
1828                         bitval ^= hash_bit_value(i * 6 + j, addr);
1829 
1830                 hash_index |= (bitval << j);
1831         }
1832 
1833         return hash_index;
1834 }
1835 
1836 /* Add multicast addresses to the internal multicast-hash table. */
1837 static void macb_sethashtable(struct net_device *dev)
1838 {
1839         struct netdev_hw_addr *ha;
1840         unsigned long mc_filter[2];
1841         unsigned int bitnr;
1842         struct macb *bp = netdev_priv(dev);
1843 
1844         mc_filter[0] = 0;
1845         mc_filter[1] = 0;
1846 
1847         netdev_for_each_mc_addr(ha, dev) {
1848                 bitnr = hash_get_index(ha->addr);
1849                 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1850         }
1851 
1852         macb_or_gem_writel(bp, HRB, mc_filter[0]);
1853         macb_or_gem_writel(bp, HRT, mc_filter[1]);
1854 }
1855 
1856 /* Enable/Disable promiscuous and multicast modes. */
1857 static void macb_set_rx_mode(struct net_device *dev)
1858 {
1859         unsigned long cfg;
1860         struct macb *bp = netdev_priv(dev);
1861 
1862         cfg = macb_readl(bp, NCFGR);
1863 
1864         if (dev->flags & IFF_PROMISC) {
1865                 /* Enable promiscuous mode */
1866                 cfg |= MACB_BIT(CAF);
1867 
1868                 /* Disable RX checksum offload */
1869                 if (macb_is_gem(bp))
1870                         cfg &= ~GEM_BIT(RXCOEN);
1871         } else {
1872                 /* Disable promiscuous mode */
1873                 cfg &= ~MACB_BIT(CAF);
1874 
1875                 /* Enable RX checksum offload only if requested */
1876                 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
1877                         cfg |= GEM_BIT(RXCOEN);
1878         }
1879 
1880         if (dev->flags & IFF_ALLMULTI) {
1881                 /* Enable all multicast mode */
1882                 macb_or_gem_writel(bp, HRB, -1);
1883                 macb_or_gem_writel(bp, HRT, -1);
1884                 cfg |= MACB_BIT(NCFGR_MTI);
1885         } else if (!netdev_mc_empty(dev)) {
1886                 /* Enable specific multicasts */
1887                 macb_sethashtable(dev);
1888                 cfg |= MACB_BIT(NCFGR_MTI);
1889         } else if (dev->flags & (~IFF_ALLMULTI)) {
1890                 /* Disable all multicast mode */
1891                 macb_or_gem_writel(bp, HRB, 0);
1892                 macb_or_gem_writel(bp, HRT, 0);
1893                 cfg &= ~MACB_BIT(NCFGR_MTI);
1894         }
1895 
1896         macb_writel(bp, NCFGR, cfg);
1897 }
1898 
1899 static int macb_open(struct net_device *dev)
1900 {
1901         struct macb *bp = netdev_priv(dev);
1902         size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1903         int err;
1904 
1905         netdev_dbg(bp->dev, "open\n");
1906 
1907         /* carrier starts down */
1908         netif_carrier_off(dev);
1909 
1910         /* if the phy is not yet register, retry later*/
1911         if (!dev->phydev)
1912                 return -EAGAIN;
1913 
1914         /* RX buffers initialization */
1915         macb_init_rx_buffer_size(bp, bufsz);
1916 
1917         err = macb_alloc_consistent(bp);
1918         if (err) {
1919                 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1920                            err);
1921                 return err;
1922         }
1923 
1924         napi_enable(&bp->napi);
1925 
1926         bp->macbgem_ops.mog_init_rings(bp);
1927         macb_init_hw(bp);
1928 
1929         /* schedule a link state check */
1930         phy_start(dev->phydev);
1931 
1932         netif_tx_start_all_queues(dev);
1933 
1934         return 0;
1935 }
1936 
1937 static int macb_close(struct net_device *dev)
1938 {
1939         struct macb *bp = netdev_priv(dev);
1940         unsigned long flags;
1941 
1942         netif_tx_stop_all_queues(dev);
1943         napi_disable(&bp->napi);
1944 
1945         if (dev->phydev)
1946                 phy_stop(dev->phydev);
1947 
1948         spin_lock_irqsave(&bp->lock, flags);
1949         macb_reset_hw(bp);
1950         netif_carrier_off(dev);
1951         spin_unlock_irqrestore(&bp->lock, flags);
1952 
1953         macb_free_consistent(bp);
1954 
1955         return 0;
1956 }
1957 
1958 static int macb_change_mtu(struct net_device *dev, int new_mtu)
1959 {
1960         struct macb *bp = netdev_priv(dev);
1961         u32 max_mtu;
1962 
1963         if (netif_running(dev))
1964                 return -EBUSY;
1965 
1966         max_mtu = ETH_DATA_LEN;
1967         if (bp->caps & MACB_CAPS_JUMBO)
1968                 max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
1969 
1970         if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
1971                 return -EINVAL;
1972 
1973         dev->mtu = new_mtu;
1974 
1975         return 0;
1976 }
1977 
1978 static void gem_update_stats(struct macb *bp)
1979 {
1980         unsigned int i;
1981         u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1982 
1983         for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1984                 u32 offset = gem_statistics[i].offset;
1985                 u64 val = bp->macb_reg_readl(bp, offset);
1986 
1987                 bp->ethtool_stats[i] += val;
1988                 *p += val;
1989 
1990                 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1991                         /* Add GEM_OCTTXH, GEM_OCTRXH */
1992                         val = bp->macb_reg_readl(bp, offset + 4);
1993                         bp->ethtool_stats[i] += ((u64)val) << 32;
1994                         *(++p) += val;
1995                 }
1996         }
1997 }
1998 
1999 static struct net_device_stats *gem_get_stats(struct macb *bp)
2000 {
2001         struct gem_stats *hwstat = &bp->hw_stats.gem;
2002         struct net_device_stats *nstat = &bp->stats;
2003 
2004         gem_update_stats(bp);
2005 
2006         nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2007                             hwstat->rx_alignment_errors +
2008                             hwstat->rx_resource_errors +
2009                             hwstat->rx_overruns +
2010                             hwstat->rx_oversize_frames +
2011                             hwstat->rx_jabbers +
2012                             hwstat->rx_undersized_frames +
2013                             hwstat->rx_length_field_frame_errors);
2014         nstat->tx_errors = (hwstat->tx_late_collisions +
2015                             hwstat->tx_excessive_collisions +
2016                             hwstat->tx_underrun +
2017                             hwstat->tx_carrier_sense_errors);
2018         nstat->multicast = hwstat->rx_multicast_frames;
2019         nstat->collisions = (hwstat->tx_single_collision_frames +
2020                              hwstat->tx_multiple_collision_frames +
2021                              hwstat->tx_excessive_collisions);
2022         nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2023                                    hwstat->rx_jabbers +
2024                                    hwstat->rx_undersized_frames +
2025                                    hwstat->rx_length_field_frame_errors);
2026         nstat->rx_over_errors = hwstat->rx_resource_errors;
2027         nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2028         nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2029         nstat->rx_fifo_errors = hwstat->rx_overruns;
2030         nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2031         nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2032         nstat->tx_fifo_errors = hwstat->tx_underrun;
2033 
2034         return nstat;
2035 }
2036 
2037 static void gem_get_ethtool_stats(struct net_device *dev,
2038                                   struct ethtool_stats *stats, u64 *data)
2039 {
2040         struct macb *bp;
2041 
2042         bp = netdev_priv(dev);
2043         gem_update_stats(bp);
2044         memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
2045 }
2046 
2047 static int gem_get_sset_count(struct net_device *dev, int sset)
2048 {
2049         switch (sset) {
2050         case ETH_SS_STATS:
2051                 return GEM_STATS_LEN;
2052         default:
2053                 return -EOPNOTSUPP;
2054         }
2055 }
2056 
2057 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2058 {
2059         unsigned int i;
2060 
2061         switch (sset) {
2062         case ETH_SS_STATS:
2063                 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2064                         memcpy(p, gem_statistics[i].stat_string,
2065                                ETH_GSTRING_LEN);
2066                 break;
2067         }
2068 }
2069 
2070 static struct net_device_stats *macb_get_stats(struct net_device *dev)
2071 {
2072         struct macb *bp = netdev_priv(dev);
2073         struct net_device_stats *nstat = &bp->stats;
2074         struct macb_stats *hwstat = &bp->hw_stats.macb;
2075 
2076         if (macb_is_gem(bp))
2077                 return gem_get_stats(bp);
2078 
2079         /* read stats from hardware */
2080         macb_update_stats(bp);
2081 
2082         /* Convert HW stats into netdevice stats */
2083         nstat->rx_errors = (hwstat->rx_fcs_errors +
2084                             hwstat->rx_align_errors +
2085                             hwstat->rx_resource_errors +
2086                             hwstat->rx_overruns +
2087                             hwstat->rx_oversize_pkts +
2088                             hwstat->rx_jabbers +
2089                             hwstat->rx_undersize_pkts +
2090                             hwstat->rx_length_mismatch);
2091         nstat->tx_errors = (hwstat->tx_late_cols +
2092                             hwstat->tx_excessive_cols +
2093                             hwstat->tx_underruns +
2094                             hwstat->tx_carrier_errors +
2095                             hwstat->sqe_test_errors);
2096         nstat->collisions = (hwstat->tx_single_cols +
2097                              hwstat->tx_multiple_cols +
2098                              hwstat->tx_excessive_cols);
2099         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2100                                    hwstat->rx_jabbers +
2101                                    hwstat->rx_undersize_pkts +
2102                                    hwstat->rx_length_mismatch);
2103         nstat->rx_over_errors = hwstat->rx_resource_errors +
2104                                    hwstat->rx_overruns;
2105         nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2106         nstat->rx_frame_errors = hwstat->rx_align_errors;
2107         nstat->rx_fifo_errors = hwstat->rx_overruns;
2108         /* XXX: What does "missed" mean? */
2109         nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2110         nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2111         nstat->tx_fifo_errors = hwstat->tx_underruns;
2112         /* Don't know about heartbeat or window errors... */
2113 
2114         return nstat;
2115 }
2116 
2117 static int macb_get_regs_len(struct net_device *netdev)
2118 {
2119         return MACB_GREGS_NBR * sizeof(u32);
2120 }
2121 
2122 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2123                           void *p)
2124 {
2125         struct macb *bp = netdev_priv(dev);
2126         unsigned int tail, head;
2127         u32 *regs_buff = p;
2128 
2129         regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2130                         | MACB_GREGS_VERSION;
2131 
2132         tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
2133         head = macb_tx_ring_wrap(bp->queues[0].tx_head);
2134 
2135         regs_buff[0]  = macb_readl(bp, NCR);
2136         regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
2137         regs_buff[2]  = macb_readl(bp, NSR);
2138         regs_buff[3]  = macb_readl(bp, TSR);
2139         regs_buff[4]  = macb_readl(bp, RBQP);
2140         regs_buff[5]  = macb_readl(bp, TBQP);
2141         regs_buff[6]  = macb_readl(bp, RSR);
2142         regs_buff[7]  = macb_readl(bp, IMR);
2143 
2144         regs_buff[8]  = tail;
2145         regs_buff[9]  = head;
2146         regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2147         regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2148 
2149         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2150                 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2151         if (macb_is_gem(bp))
2152                 regs_buff[13] = gem_readl(bp, DMACFG);
2153 }
2154 
2155 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2156 {
2157         struct macb *bp = netdev_priv(netdev);
2158 
2159         wol->supported = 0;
2160         wol->wolopts = 0;
2161 
2162         if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2163                 wol->supported = WAKE_MAGIC;
2164 
2165                 if (bp->wol & MACB_WOL_ENABLED)
2166                         wol->wolopts |= WAKE_MAGIC;
2167         }
2168 }
2169 
2170 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2171 {
2172         struct macb *bp = netdev_priv(netdev);
2173 
2174         if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2175             (wol->wolopts & ~WAKE_MAGIC))
2176                 return -EOPNOTSUPP;
2177 
2178         if (wol->wolopts & WAKE_MAGIC)
2179                 bp->wol |= MACB_WOL_ENABLED;
2180         else
2181                 bp->wol &= ~MACB_WOL_ENABLED;
2182 
2183         device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2184 
2185         return 0;
2186 }
2187 
2188 static const struct ethtool_ops macb_ethtool_ops = {
2189         .get_regs_len           = macb_get_regs_len,
2190         .get_regs               = macb_get_regs,
2191         .get_link               = ethtool_op_get_link,
2192         .get_ts_info            = ethtool_op_get_ts_info,
2193         .get_wol                = macb_get_wol,
2194         .set_wol                = macb_set_wol,
2195         .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2196         .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2197 };
2198 
2199 static const struct ethtool_ops gem_ethtool_ops = {
2200         .get_regs_len           = macb_get_regs_len,
2201         .get_regs               = macb_get_regs,
2202         .get_link               = ethtool_op_get_link,
2203         .get_ts_info            = ethtool_op_get_ts_info,
2204         .get_ethtool_stats      = gem_get_ethtool_stats,
2205         .get_strings            = gem_get_ethtool_strings,
2206         .get_sset_count         = gem_get_sset_count,
2207         .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2208         .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2209 };
2210 
2211 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2212 {
2213         struct phy_device *phydev = dev->phydev;
2214 
2215         if (!netif_running(dev))
2216                 return -EINVAL;
2217 
2218         if (!phydev)
2219                 return -ENODEV;
2220 
2221         return phy_mii_ioctl(phydev, rq, cmd);
2222 }
2223 
2224 static int macb_set_features(struct net_device *netdev,
2225                              netdev_features_t features)
2226 {
2227         struct macb *bp = netdev_priv(netdev);
2228         netdev_features_t changed = features ^ netdev->features;
2229 
2230         /* TX checksum offload */
2231         if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2232                 u32 dmacfg;
2233 
2234                 dmacfg = gem_readl(bp, DMACFG);
2235                 if (features & NETIF_F_HW_CSUM)
2236                         dmacfg |= GEM_BIT(TXCOEN);
2237                 else
2238                         dmacfg &= ~GEM_BIT(TXCOEN);
2239                 gem_writel(bp, DMACFG, dmacfg);
2240         }
2241 
2242         /* RX checksum offload */
2243         if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2244                 u32 netcfg;
2245 
2246                 netcfg = gem_readl(bp, NCFGR);
2247                 if (features & NETIF_F_RXCSUM &&
2248                     !(netdev->flags & IFF_PROMISC))
2249                         netcfg |= GEM_BIT(RXCOEN);
2250                 else
2251                         netcfg &= ~GEM_BIT(RXCOEN);
2252                 gem_writel(bp, NCFGR, netcfg);
2253         }
2254 
2255         return 0;
2256 }
2257 
2258 static const struct net_device_ops macb_netdev_ops = {
2259         .ndo_open               = macb_open,
2260         .ndo_stop               = macb_close,
2261         .ndo_start_xmit         = macb_start_xmit,
2262         .ndo_set_rx_mode        = macb_set_rx_mode,
2263         .ndo_get_stats          = macb_get_stats,
2264         .ndo_do_ioctl           = macb_ioctl,
2265         .ndo_validate_addr      = eth_validate_addr,
2266         .ndo_change_mtu         = macb_change_mtu,
2267         .ndo_set_mac_address    = eth_mac_addr,
2268 #ifdef CONFIG_NET_POLL_CONTROLLER
2269         .ndo_poll_controller    = macb_poll_controller,
2270 #endif
2271         .ndo_set_features       = macb_set_features,
2272 };
2273 
2274 /* Configure peripheral capabilities according to device tree
2275  * and integration options used
2276  */
2277 static void macb_configure_caps(struct macb *bp,
2278                                 const struct macb_config *dt_conf)
2279 {
2280         u32 dcfg;
2281 
2282         if (dt_conf)
2283                 bp->caps = dt_conf->caps;
2284 
2285         if (hw_is_gem(bp->regs, bp->native_io)) {
2286                 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2287 
2288                 dcfg = gem_readl(bp, DCFG1);
2289                 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2290                         bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2291                 dcfg = gem_readl(bp, DCFG2);
2292                 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2293                         bp->caps |= MACB_CAPS_FIFO_MODE;
2294         }
2295 
2296         dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2297 }
2298 
2299 static void macb_probe_queues(void __iomem *mem,
2300                               bool native_io,
2301                               unsigned int *queue_mask,
2302                               unsigned int *num_queues)
2303 {
2304         unsigned int hw_q;
2305 
2306         *queue_mask = 0x1;
2307         *num_queues = 1;
2308 
2309         /* is it macb or gem ?
2310          *
2311          * We need to read directly from the hardware here because
2312          * we are early in the probe process and don't have the
2313          * MACB_CAPS_MACB_IS_GEM flag positioned
2314          */
2315         if (!hw_is_gem(mem, native_io))
2316                 return;
2317 
2318         /* bit 0 is never set but queue 0 always exists */
2319         *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
2320 
2321         *queue_mask |= 0x1;
2322 
2323         for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2324                 if (*queue_mask & (1 << hw_q))
2325                         (*num_queues)++;
2326 }
2327 
2328 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2329                          struct clk **hclk, struct clk **tx_clk)
2330 {
2331         int err;
2332 
2333         *pclk = devm_clk_get(&pdev->dev, "pclk");
2334         if (IS_ERR(*pclk)) {
2335                 err = PTR_ERR(*pclk);
2336                 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2337                 return err;
2338         }
2339 
2340         *hclk = devm_clk_get(&pdev->dev, "hclk");
2341         if (IS_ERR(*hclk)) {
2342                 err = PTR_ERR(*hclk);
2343                 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2344                 return err;
2345         }
2346 
2347         *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2348         if (IS_ERR(*tx_clk))
2349                 *tx_clk = NULL;
2350 
2351         err = clk_prepare_enable(*pclk);
2352         if (err) {
2353                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2354                 return err;
2355         }
2356 
2357         err = clk_prepare_enable(*hclk);
2358         if (err) {
2359                 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2360                 goto err_disable_pclk;
2361         }
2362 
2363         err = clk_prepare_enable(*tx_clk);
2364         if (err) {
2365                 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2366                 goto err_disable_hclk;
2367         }
2368 
2369         return 0;
2370 
2371 err_disable_hclk:
2372         clk_disable_unprepare(*hclk);
2373 
2374 err_disable_pclk:
2375         clk_disable_unprepare(*pclk);
2376 
2377         return err;
2378 }
2379 
2380 static int macb_init(struct platform_device *pdev)
2381 {
2382         struct net_device *dev = platform_get_drvdata(pdev);
2383         unsigned int hw_q, q;
2384         struct macb *bp = netdev_priv(dev);
2385         struct macb_queue *queue;
2386         int err;
2387         u32 val;
2388 
2389         /* set the queue register mapping once for all: queue0 has a special
2390          * register mapping but we don't want to test the queue index then
2391          * compute the corresponding register offset at run time.
2392          */
2393         for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2394                 if (!(bp->queue_mask & (1 << hw_q)))
2395                         continue;
2396 
2397                 queue = &bp->queues[q];
2398                 queue->bp = bp;
2399                 if (hw_q) {
2400                         queue->ISR  = GEM_ISR(hw_q - 1);
2401                         queue->IER  = GEM_IER(hw_q - 1);
2402                         queue->IDR  = GEM_IDR(hw_q - 1);
2403                         queue->IMR  = GEM_IMR(hw_q - 1);
2404                         queue->TBQP = GEM_TBQP(hw_q - 1);
2405                 } else {
2406                         /* queue0 uses legacy registers */
2407                         queue->ISR  = MACB_ISR;
2408                         queue->IER  = MACB_IER;
2409                         queue->IDR  = MACB_IDR;
2410                         queue->IMR  = MACB_IMR;
2411                         queue->TBQP = MACB_TBQP;
2412                 }
2413 
2414                 /* get irq: here we use the linux queue index, not the hardware
2415                  * queue index. the queue irq definitions in the device tree
2416                  * must remove the optional gaps that could exist in the
2417                  * hardware queue mask.
2418                  */
2419                 queue->irq = platform_get_irq(pdev, q);
2420                 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2421                                        IRQF_SHARED, dev->name, queue);
2422                 if (err) {
2423                         dev_err(&pdev->dev,
2424                                 "Unable to request IRQ %d (error %d)\n",
2425                                 queue->irq, err);
2426                         return err;
2427                 }
2428 
2429                 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2430                 q++;
2431         }
2432 
2433         dev->netdev_ops = &macb_netdev_ops;
2434         netif_napi_add(dev, &bp->napi, macb_poll, 64);
2435 
2436         /* setup appropriated routines according to adapter type */
2437         if (macb_is_gem(bp)) {
2438                 bp->max_tx_length = GEM_MAX_TX_LEN;
2439                 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2440                 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2441                 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2442                 bp->macbgem_ops.mog_rx = gem_rx;
2443                 dev->ethtool_ops = &gem_ethtool_ops;
2444         } else {
2445                 bp->max_tx_length = MACB_MAX_TX_LEN;
2446                 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2447                 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2448                 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2449                 bp->macbgem_ops.mog_rx = macb_rx;
2450                 dev->ethtool_ops = &macb_ethtool_ops;
2451         }
2452 
2453         /* Set features */
2454         dev->hw_features = NETIF_F_SG;
2455         /* Checksum offload is only available on gem with packet buffer */
2456         if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2457                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2458         if (bp->caps & MACB_CAPS_SG_DISABLED)
2459                 dev->hw_features &= ~NETIF_F_SG;
2460         dev->features = dev->hw_features;
2461 
2462         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
2463                 val = 0;
2464                 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2465                         val = GEM_BIT(RGMII);
2466                 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
2467                          (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2468                         val = MACB_BIT(RMII);
2469                 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2470                         val = MACB_BIT(MII);
2471 
2472                 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
2473                         val |= MACB_BIT(CLKEN);
2474 
2475                 macb_or_gem_writel(bp, USRIO, val);
2476         }
2477 
2478         /* Set MII management clock divider */
2479         val = macb_mdc_clk_div(bp);
2480         val |= macb_dbw(bp);
2481         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2482                 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2483         macb_writel(bp, NCFGR, val);
2484 
2485         return 0;
2486 }
2487 
2488 #if defined(CONFIG_OF)
2489 /* 1518 rounded up */
2490 #define AT91ETHER_MAX_RBUFF_SZ  0x600
2491 /* max number of receive buffers */
2492 #define AT91ETHER_MAX_RX_DESCR  9
2493 
2494 /* Initialize and start the Receiver and Transmit subsystems */
2495 static int at91ether_start(struct net_device *dev)
2496 {
2497         struct macb *lp = netdev_priv(dev);
2498         dma_addr_t addr;
2499         u32 ctl;
2500         int i;
2501 
2502         lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2503                                          (AT91ETHER_MAX_RX_DESCR *
2504                                           sizeof(struct macb_dma_desc)),
2505                                          &lp->rx_ring_dma, GFP_KERNEL);
2506         if (!lp->rx_ring)
2507                 return -ENOMEM;
2508 
2509         lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
2510                                             AT91ETHER_MAX_RX_DESCR *
2511                                             AT91ETHER_MAX_RBUFF_SZ,
2512                                             &lp->rx_buffers_dma, GFP_KERNEL);
2513         if (!lp->rx_buffers) {
2514                 dma_free_coherent(&lp->pdev->dev,
2515                                   AT91ETHER_MAX_RX_DESCR *
2516                                   sizeof(struct macb_dma_desc),
2517                                   lp->rx_ring, lp->rx_ring_dma);
2518                 lp->rx_ring = NULL;
2519                 return -ENOMEM;
2520         }
2521 
2522         addr = lp->rx_buffers_dma;
2523         for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2524                 lp->rx_ring[i].addr = addr;
2525                 lp->rx_ring[i].ctrl = 0;
2526                 addr += AT91ETHER_MAX_RBUFF_SZ;
2527         }
2528 
2529         /* Set the Wrap bit on the last descriptor */
2530         lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
2531 
2532         /* Reset buffer index */
2533         lp->rx_tail = 0;
2534 
2535         /* Program address of descriptor list in Rx Buffer Queue register */
2536         macb_writel(lp, RBQP, lp->rx_ring_dma);
2537 
2538         /* Enable Receive and Transmit */
2539         ctl = macb_readl(lp, NCR);
2540         macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
2541 
2542         return 0;
2543 }
2544 
2545 /* Open the ethernet interface */
2546 static int at91ether_open(struct net_device *dev)
2547 {
2548         struct macb *lp = netdev_priv(dev);
2549         u32 ctl;
2550         int ret;
2551 
2552         /* Clear internal statistics */
2553         ctl = macb_readl(lp, NCR);
2554         macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2555 
2556         macb_set_hwaddr(lp);
2557 
2558         ret = at91ether_start(dev);
2559         if (ret)
2560                 return ret;
2561 
2562         /* Enable MAC interrupts */
2563         macb_writel(lp, IER, MACB_BIT(RCOMP)    |
2564                              MACB_BIT(RXUBR)    |
2565                              MACB_BIT(ISR_TUND) |
2566                              MACB_BIT(ISR_RLE)  |
2567                              MACB_BIT(TCOMP)    |
2568                              MACB_BIT(ISR_ROVR) |
2569                              MACB_BIT(HRESP));
2570 
2571         /* schedule a link state check */
2572         phy_start(dev->phydev);
2573 
2574         netif_start_queue(dev);
2575 
2576         return 0;
2577 }
2578 
2579 /* Close the interface */
2580 static int at91ether_close(struct net_device *dev)
2581 {
2582         struct macb *lp = netdev_priv(dev);
2583         u32 ctl;
2584 
2585         /* Disable Receiver and Transmitter */
2586         ctl = macb_readl(lp, NCR);
2587         macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
2588 
2589         /* Disable MAC interrupts */
2590         macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
2591                              MACB_BIT(RXUBR)    |
2592                              MACB_BIT(ISR_TUND) |
2593                              MACB_BIT(ISR_RLE)  |
2594                              MACB_BIT(TCOMP)    |
2595                              MACB_BIT(ISR_ROVR) |
2596                              MACB_BIT(HRESP));
2597 
2598         netif_stop_queue(dev);
2599 
2600         dma_free_coherent(&lp->pdev->dev,
2601                           AT91ETHER_MAX_RX_DESCR *
2602                           sizeof(struct macb_dma_desc),
2603                           lp->rx_ring, lp->rx_ring_dma);
2604         lp->rx_ring = NULL;
2605 
2606         dma_free_coherent(&lp->pdev->dev,
2607                           AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
2608                           lp->rx_buffers, lp->rx_buffers_dma);
2609         lp->rx_buffers = NULL;
2610 
2611         return 0;
2612 }
2613 
2614 /* Transmit packet */
2615 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2616 {
2617         struct macb *lp = netdev_priv(dev);
2618 
2619         if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
2620                 netif_stop_queue(dev);
2621 
2622                 /* Store packet information (to free when Tx completed) */
2623                 lp->skb = skb;
2624                 lp->skb_length = skb->len;
2625                 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2626                                                         DMA_TO_DEVICE);
2627 
2628                 /* Set address of the data in the Transmit Address register */
2629                 macb_writel(lp, TAR, lp->skb_physaddr);
2630                 /* Set length of the packet in the Transmit Control register */
2631                 macb_writel(lp, TCR, skb->len);
2632 
2633         } else {
2634                 netdev_err(dev, "%s called, but device is busy!\n", __func__);
2635                 return NETDEV_TX_BUSY;
2636         }
2637 
2638         return NETDEV_TX_OK;
2639 }
2640 
2641 /* Extract received frame from buffer descriptors and sent to upper layers.
2642  * (Called from interrupt context)
2643  */
2644 static void at91ether_rx(struct net_device *dev)
2645 {
2646         struct macb *lp = netdev_priv(dev);
2647         unsigned char *p_recv;
2648         struct sk_buff *skb;
2649         unsigned int pktlen;
2650 
2651         while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
2652                 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2653                 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
2654                 skb = netdev_alloc_skb(dev, pktlen + 2);
2655                 if (skb) {
2656                         skb_reserve(skb, 2);
2657                         memcpy(skb_put(skb, pktlen), p_recv, pktlen);
2658 
2659                         skb->protocol = eth_type_trans(skb, dev);
2660                         lp->stats.rx_packets++;
2661                         lp->stats.rx_bytes += pktlen;
2662                         netif_rx(skb);
2663                 } else {
2664                         lp->stats.rx_dropped++;
2665                 }
2666 
2667                 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
2668                         lp->stats.multicast++;
2669 
2670                 /* reset ownership bit */
2671                 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
2672 
2673                 /* wrap after last buffer */
2674                 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2675                         lp->rx_tail = 0;
2676                 else
2677                         lp->rx_tail++;
2678         }
2679 }
2680 
2681 /* MAC interrupt handler */
2682 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
2683 {
2684         struct net_device *dev = dev_id;
2685         struct macb *lp = netdev_priv(dev);
2686         u32 intstatus, ctl;
2687 
2688         /* MAC Interrupt Status register indicates what interrupts are pending.
2689          * It is automatically cleared once read.
2690          */
2691         intstatus = macb_readl(lp, ISR);
2692 
2693         /* Receive complete */
2694         if (intstatus & MACB_BIT(RCOMP))
2695                 at91ether_rx(dev);
2696 
2697         /* Transmit complete */
2698         if (intstatus & MACB_BIT(TCOMP)) {
2699                 /* The TCOM bit is set even if the transmission failed */
2700                 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
2701                         lp->stats.tx_errors++;
2702 
2703                 if (lp->skb) {
2704                         dev_kfree_skb_irq(lp->skb);
2705                         lp->skb = NULL;
2706                         dma_unmap_single(NULL, lp->skb_physaddr,
2707                                          lp->skb_length, DMA_TO_DEVICE);
2708                         lp->stats.tx_packets++;
2709                         lp->stats.tx_bytes += lp->skb_length;
2710                 }
2711                 netif_wake_queue(dev);
2712         }
2713 
2714         /* Work-around for EMAC Errata section 41.3.1 */
2715         if (intstatus & MACB_BIT(RXUBR)) {
2716                 ctl = macb_readl(lp, NCR);
2717                 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
2718                 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
2719         }
2720 
2721         if (intstatus & MACB_BIT(ISR_ROVR))
2722                 netdev_err(dev, "ROVR error\n");
2723 
2724         return IRQ_HANDLED;
2725 }
2726 
2727 #ifdef CONFIG_NET_POLL_CONTROLLER
2728 static void at91ether_poll_controller(struct net_device *dev)
2729 {
2730         unsigned long flags;
2731 
2732         local_irq_save(flags);
2733         at91ether_interrupt(dev->irq, dev);
2734         local_irq_restore(flags);
2735 }
2736 #endif
2737 
2738 static const struct net_device_ops at91ether_netdev_ops = {
2739         .ndo_open               = at91ether_open,
2740         .ndo_stop               = at91ether_close,
2741         .ndo_start_xmit         = at91ether_start_xmit,
2742         .ndo_get_stats          = macb_get_stats,
2743         .ndo_set_rx_mode        = macb_set_rx_mode,
2744         .ndo_set_mac_address    = eth_mac_addr,
2745         .ndo_do_ioctl           = macb_ioctl,
2746         .ndo_validate_addr      = eth_validate_addr,
2747         .ndo_change_mtu         = eth_change_mtu,
2748 #ifdef CONFIG_NET_POLL_CONTROLLER
2749         .ndo_poll_controller    = at91ether_poll_controller,
2750 #endif
2751 };
2752 
2753 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
2754                               struct clk **hclk, struct clk **tx_clk)
2755 {
2756         int err;
2757 
2758         *hclk = NULL;
2759         *tx_clk = NULL;
2760 
2761         *pclk = devm_clk_get(&pdev->dev, "ether_clk");
2762         if (IS_ERR(*pclk))
2763                 return PTR_ERR(*pclk);
2764 
2765         err = clk_prepare_enable(*pclk);
2766         if (err) {
2767                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2768                 return err;
2769         }
2770 
2771         return 0;
2772 }
2773 
2774 static int at91ether_init(struct platform_device *pdev)
2775 {
2776         struct net_device *dev = platform_get_drvdata(pdev);
2777         struct macb *bp = netdev_priv(dev);
2778         int err;
2779         u32 reg;
2780 
2781         dev->netdev_ops = &at91ether_netdev_ops;
2782         dev->ethtool_ops = &macb_ethtool_ops;
2783 
2784         err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
2785                                0, dev->name, dev);
2786         if (err)
2787                 return err;
2788 
2789         macb_writel(bp, NCR, 0);
2790 
2791         reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
2792         if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
2793                 reg |= MACB_BIT(RM9200_RMII);
2794 
2795         macb_writel(bp, NCFGR, reg);
2796 
2797         return 0;
2798 }
2799 
2800 static const struct macb_config at91sam9260_config = {
2801         .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2802         .clk_init = macb_clk_init,
2803         .init = macb_init,
2804 };
2805 
2806 static const struct macb_config pc302gem_config = {
2807         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2808         .dma_burst_length = 16,
2809         .clk_init = macb_clk_init,
2810         .init = macb_init,
2811 };
2812 
2813 static const struct macb_config sama5d2_config = {
2814         .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2815         .dma_burst_length = 16,
2816         .clk_init = macb_clk_init,
2817         .init = macb_init,
2818 };
2819 
2820 static const struct macb_config sama5d3_config = {
2821         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
2822               | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2823         .dma_burst_length = 16,
2824         .clk_init = macb_clk_init,
2825         .init = macb_init,
2826 };
2827 
2828 static const struct macb_config sama5d4_config = {
2829         .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2830         .dma_burst_length = 4,
2831         .clk_init = macb_clk_init,
2832         .init = macb_init,
2833 };
2834 
2835 static const struct macb_config emac_config = {
2836         .clk_init = at91ether_clk_init,
2837         .init = at91ether_init,
2838 };
2839 
2840 static const struct macb_config np4_config = {
2841         .caps = MACB_CAPS_USRIO_DISABLED,
2842         .clk_init = macb_clk_init,
2843         .init = macb_init,
2844 };
2845 
2846 static const struct macb_config zynqmp_config = {
2847         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
2848         .dma_burst_length = 16,
2849         .clk_init = macb_clk_init,
2850         .init = macb_init,
2851         .jumbo_max_len = 10240,
2852 };
2853 
2854 static const struct macb_config zynq_config = {
2855         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
2856         .dma_burst_length = 16,
2857         .clk_init = macb_clk_init,
2858         .init = macb_init,
2859 };
2860 
2861 static const struct of_device_id macb_dt_ids[] = {
2862         { .compatible = "cdns,at32ap7000-macb" },
2863         { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
2864         { .compatible = "cdns,macb" },
2865         { .compatible = "cdns,np4-macb", .data = &np4_config },
2866         { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
2867         { .compatible = "cdns,gem", .data = &pc302gem_config },
2868         { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
2869         { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
2870         { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
2871         { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
2872         { .compatible = "cdns,emac", .data = &emac_config },
2873         { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
2874         { .compatible = "cdns,zynq-gem", .data = &zynq_config },
2875         { /* sentinel */ }
2876 };
2877 MODULE_DEVICE_TABLE(of, macb_dt_ids);
2878 #endif /* CONFIG_OF */
2879 
2880 static int macb_probe(struct platform_device *pdev)
2881 {
2882         int (*clk_init)(struct platform_device *, struct clk **,
2883                         struct clk **, struct clk **)
2884                                               = macb_clk_init;
2885         int (*init)(struct platform_device *) = macb_init;
2886         struct device_node *np = pdev->dev.of_node;
2887         struct device_node *phy_node;
2888         const struct macb_config *macb_config = NULL;
2889         struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
2890         unsigned int queue_mask, num_queues;
2891         struct macb_platform_data *pdata;
2892         bool native_io;
2893         struct phy_device *phydev;
2894         struct net_device *dev;
2895         struct resource *regs;
2896         void __iomem *mem;
2897         const char *mac;
2898         struct macb *bp;
2899         int err;
2900 
2901         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2902         mem = devm_ioremap_resource(&pdev->dev, regs);
2903         if (IS_ERR(mem))
2904                 return PTR_ERR(mem);
2905 
2906         if (np) {
2907                 const struct of_device_id *match;
2908 
2909                 match = of_match_node(macb_dt_ids, np);
2910                 if (match && match->data) {
2911                         macb_config = match->data;
2912                         clk_init = macb_config->clk_init;
2913                         init = macb_config->init;
2914                 }
2915         }
2916 
2917         err = clk_init(pdev, &pclk, &hclk, &tx_clk);
2918         if (err)
2919                 return err;
2920 
2921         native_io = hw_is_native_io(mem);
2922 
2923         macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2924         dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2925         if (!dev) {
2926                 err = -ENOMEM;
2927                 goto err_disable_clocks;
2928         }
2929 
2930         dev->base_addr = regs->start;
2931 
2932         SET_NETDEV_DEV(dev, &pdev->dev);
2933 
2934         bp = netdev_priv(dev);
2935         bp->pdev = pdev;
2936         bp->dev = dev;
2937         bp->regs = mem;
2938         bp->native_io = native_io;
2939         if (native_io) {
2940                 bp->macb_reg_readl = hw_readl_native;
2941                 bp->macb_reg_writel = hw_writel_native;
2942         } else {
2943                 bp->macb_reg_readl = hw_readl;
2944                 bp->macb_reg_writel = hw_writel;
2945         }
2946         bp->num_queues = num_queues;
2947         bp->queue_mask = queue_mask;
2948         if (macb_config)
2949                 bp->dma_burst_length = macb_config->dma_burst_length;
2950         bp->pclk = pclk;
2951         bp->hclk = hclk;
2952         bp->tx_clk = tx_clk;
2953         if (macb_config)
2954                 bp->jumbo_max_len = macb_config->jumbo_max_len;
2955 
2956         bp->wol = 0;
2957         if (of_get_property(np, "magic-packet", NULL))
2958                 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
2959         device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
2960 
2961         spin_lock_init(&bp->lock);
2962 
2963         /* setup capabilities */
2964         macb_configure_caps(bp, macb_config);
2965 
2966         platform_set_drvdata(pdev, dev);
2967 
2968         dev->irq = platform_get_irq(pdev, 0);
2969         if (dev->irq < 0) {
2970                 err = dev->irq;
2971                 goto err_disable_clocks;
2972         }
2973 
2974         mac = of_get_mac_address(np);
2975         if (mac)
2976                 ether_addr_copy(bp->dev->dev_addr, mac);
2977         else
2978                 macb_get_hwaddr(bp);
2979 
2980         /* Power up the PHY if there is a GPIO reset */
2981         phy_node =  of_get_next_available_child(np, NULL);
2982         if (phy_node) {
2983                 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
2984 
2985                 if (gpio_is_valid(gpio)) {
2986                         bp->reset_gpio = gpio_to_desc(gpio);
2987                         gpiod_direction_output(bp->reset_gpio, 1);
2988                 }
2989         }
2990         of_node_put(phy_node);
2991 
2992         err = of_get_phy_mode(np);
2993         if (err < 0) {
2994                 pdata = dev_get_platdata(&pdev->dev);
2995                 if (pdata && pdata->is_rmii)
2996                         bp->phy_interface = PHY_INTERFACE_MODE_RMII;
2997                 else
2998                         bp->phy_interface = PHY_INTERFACE_MODE_MII;
2999         } else {
3000                 bp->phy_interface = err;
3001         }
3002 
3003         /* IP specific init */
3004         err = init(pdev);
3005         if (err)
3006                 goto err_out_free_netdev;
3007 
3008         err = macb_mii_init(bp);
3009         if (err)
3010                 goto err_out_free_netdev;
3011 
3012         phydev = dev->phydev;
3013 
3014         netif_carrier_off(dev);
3015 
3016         err = register_netdev(dev);
3017         if (err) {
3018                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3019                 goto err_out_unregister_mdio;
3020         }
3021 
3022         phy_attached_info(phydev);
3023 
3024         netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3025                     macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3026                     dev->base_addr, dev->irq, dev->dev_addr);
3027 
3028         return 0;
3029 
3030 err_out_unregister_mdio:
3031         phy_disconnect(dev->phydev);
3032         mdiobus_unregister(bp->mii_bus);
3033         mdiobus_free(bp->mii_bus);
3034 
3035         /* Shutdown the PHY if there is a GPIO reset */
3036         if (bp->reset_gpio)
3037                 gpiod_set_value(bp->reset_gpio, 0);
3038 
3039 err_out_free_netdev:
3040         free_netdev(dev);
3041 
3042 err_disable_clocks:
3043         clk_disable_unprepare(tx_clk);
3044         clk_disable_unprepare(hclk);
3045         clk_disable_unprepare(pclk);
3046 
3047         return err;
3048 }
3049 
3050 static int macb_remove(struct platform_device *pdev)
3051 {
3052         struct net_device *dev;
3053         struct macb *bp;
3054 
3055         dev = platform_get_drvdata(pdev);
3056 
3057         if (dev) {
3058                 bp = netdev_priv(dev);
3059                 if (dev->phydev)
3060                         phy_disconnect(dev->phydev);
3061                 mdiobus_unregister(bp->mii_bus);
3062                 mdiobus_free(bp->mii_bus);
3063 
3064                 /* Shutdown the PHY if there is a GPIO reset */
3065                 if (bp->reset_gpio)
3066                         gpiod_set_value(bp->reset_gpio, 0);
3067 
3068                 unregister_netdev(dev);
3069                 clk_disable_unprepare(bp->tx_clk);
3070                 clk_disable_unprepare(bp->hclk);
3071                 clk_disable_unprepare(bp->pclk);
3072                 free_netdev(dev);
3073         }
3074 
3075         return 0;
3076 }
3077 
3078 static int __maybe_unused macb_suspend(struct device *dev)
3079 {
3080         struct platform_device *pdev = to_platform_device(dev);
3081         struct net_device *netdev = platform_get_drvdata(pdev);
3082         struct macb *bp = netdev_priv(netdev);
3083 
3084         netif_carrier_off(netdev);
3085         netif_device_detach(netdev);
3086 
3087         if (bp->wol & MACB_WOL_ENABLED) {
3088                 macb_writel(bp, IER, MACB_BIT(WOL));
3089                 macb_writel(bp, WOL, MACB_BIT(MAG));
3090                 enable_irq_wake(bp->queues[0].irq);
3091         } else {
3092                 clk_disable_unprepare(bp->tx_clk);
3093                 clk_disable_unprepare(bp->hclk);
3094                 clk_disable_unprepare(bp->pclk);
3095         }
3096 
3097         return 0;
3098 }
3099 
3100 static int __maybe_unused macb_resume(struct device *dev)
3101 {
3102         struct platform_device *pdev = to_platform_device(dev);
3103         struct net_device *netdev = platform_get_drvdata(pdev);
3104         struct macb *bp = netdev_priv(netdev);
3105 
3106         if (bp->wol & MACB_WOL_ENABLED) {
3107                 macb_writel(bp, IDR, MACB_BIT(WOL));
3108                 macb_writel(bp, WOL, 0);
3109                 disable_irq_wake(bp->queues[0].irq);
3110         } else {
3111                 clk_prepare_enable(bp->pclk);
3112                 clk_prepare_enable(bp->hclk);
3113                 clk_prepare_enable(bp->tx_clk);
3114         }
3115 
3116         netif_device_attach(netdev);
3117 
3118         return 0;
3119 }
3120 
3121 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
3122 
3123 static struct platform_driver macb_driver = {
3124         .probe          = macb_probe,
3125         .remove         = macb_remove,
3126         .driver         = {
3127                 .name           = "macb",
3128                 .of_match_table = of_match_ptr(macb_dt_ids),
3129                 .pm     = &macb_pm_ops,
3130         },
3131 };
3132 
3133 module_platform_driver(macb_driver);
3134 
3135 MODULE_LICENSE("GPL");
3136 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
3137 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3138 MODULE_ALIAS("platform:macb");
3139 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us