Version:  2.0.40 2.2.26 2.4.37 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9

Linux/drivers/net/ethernet/cadence/macb.c

  1 /*
  2  * Cadence MACB/GEM Ethernet Controller driver
  3  *
  4  * Copyright (C) 2004-2006 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 
 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12 #include <linux/clk.h>
 13 #include <linux/module.h>
 14 #include <linux/moduleparam.h>
 15 #include <linux/kernel.h>
 16 #include <linux/types.h>
 17 #include <linux/circ_buf.h>
 18 #include <linux/slab.h>
 19 #include <linux/init.h>
 20 #include <linux/io.h>
 21 #include <linux/gpio.h>
 22 #include <linux/gpio/consumer.h>
 23 #include <linux/interrupt.h>
 24 #include <linux/netdevice.h>
 25 #include <linux/etherdevice.h>
 26 #include <linux/dma-mapping.h>
 27 #include <linux/platform_data/macb.h>
 28 #include <linux/platform_device.h>
 29 #include <linux/phy.h>
 30 #include <linux/of.h>
 31 #include <linux/of_device.h>
 32 #include <linux/of_gpio.h>
 33 #include <linux/of_mdio.h>
 34 #include <linux/of_net.h>
 35 
 36 #include "macb.h"
 37 
 38 #define MACB_RX_BUFFER_SIZE     128
 39 #define RX_BUFFER_MULTIPLE      64  /* bytes */
 40 #define RX_RING_SIZE            512 /* must be power of 2 */
 41 #define RX_RING_BYTES           (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 42 
 43 #define TX_RING_SIZE            128 /* must be power of 2 */
 44 #define TX_RING_BYTES           (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 45 
 46 /* level of occupied TX descriptors under which we wake up TX process */
 47 #define MACB_TX_WAKEUP_THRESH   (3 * TX_RING_SIZE / 4)
 48 
 49 #define MACB_RX_INT_FLAGS       (MACB_BIT(RCOMP) | MACB_BIT(RXUBR)      \
 50                                  | MACB_BIT(ISR_ROVR))
 51 #define MACB_TX_ERR_FLAGS       (MACB_BIT(ISR_TUND)                     \
 52                                         | MACB_BIT(ISR_RLE)             \
 53                                         | MACB_BIT(TXERR))
 54 #define MACB_TX_INT_FLAGS       (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 55 
 56 #define MACB_MAX_TX_LEN         ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 57 #define GEM_MAX_TX_LEN          ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 58 
 59 #define GEM_MTU_MIN_SIZE        68
 60 
 61 #define MACB_WOL_HAS_MAGIC_PACKET       (0x1 << 0)
 62 #define MACB_WOL_ENABLED                (0x1 << 1)
 63 
 64 /* Graceful stop timeouts in us. We should allow up to
 65  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 66  */
 67 #define MACB_HALT_TIMEOUT       1230
 68 
 69 /* Ring buffer accessors */
 70 static unsigned int macb_tx_ring_wrap(unsigned int index)
 71 {
 72         return index & (TX_RING_SIZE - 1);
 73 }
 74 
 75 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
 76                                           unsigned int index)
 77 {
 78         return &queue->tx_ring[macb_tx_ring_wrap(index)];
 79 }
 80 
 81 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
 82                                        unsigned int index)
 83 {
 84         return &queue->tx_skb[macb_tx_ring_wrap(index)];
 85 }
 86 
 87 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
 88 {
 89         dma_addr_t offset;
 90 
 91         offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
 92 
 93         return queue->tx_ring_dma + offset;
 94 }
 95 
 96 static unsigned int macb_rx_ring_wrap(unsigned int index)
 97 {
 98         return index & (RX_RING_SIZE - 1);
 99 }
100 
101 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
102 {
103         return &bp->rx_ring[macb_rx_ring_wrap(index)];
104 }
105 
106 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
107 {
108         return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
109 }
110 
111 /* I/O accessors */
112 static u32 hw_readl_native(struct macb *bp, int offset)
113 {
114         return __raw_readl(bp->regs + offset);
115 }
116 
117 static void hw_writel_native(struct macb *bp, int offset, u32 value)
118 {
119         __raw_writel(value, bp->regs + offset);
120 }
121 
122 static u32 hw_readl(struct macb *bp, int offset)
123 {
124         return readl_relaxed(bp->regs + offset);
125 }
126 
127 static void hw_writel(struct macb *bp, int offset, u32 value)
128 {
129         writel_relaxed(value, bp->regs + offset);
130 }
131 
132 /* Find the CPU endianness by using the loopback bit of NCR register. When the
133  * CPU is in big endian we need to program swapped mode for management
134  * descriptor access.
135  */
136 static bool hw_is_native_io(void __iomem *addr)
137 {
138         u32 value = MACB_BIT(LLB);
139 
140         __raw_writel(value, addr + MACB_NCR);
141         value = __raw_readl(addr + MACB_NCR);
142 
143         /* Write 0 back to disable everything */
144         __raw_writel(0, addr + MACB_NCR);
145 
146         return value == MACB_BIT(LLB);
147 }
148 
149 static bool hw_is_gem(void __iomem *addr, bool native_io)
150 {
151         u32 id;
152 
153         if (native_io)
154                 id = __raw_readl(addr + MACB_MID);
155         else
156                 id = readl_relaxed(addr + MACB_MID);
157 
158         return MACB_BFEXT(IDNUM, id) >= 0x2;
159 }
160 
161 static void macb_set_hwaddr(struct macb *bp)
162 {
163         u32 bottom;
164         u16 top;
165 
166         bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
167         macb_or_gem_writel(bp, SA1B, bottom);
168         top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
169         macb_or_gem_writel(bp, SA1T, top);
170 
171         /* Clear unused address register sets */
172         macb_or_gem_writel(bp, SA2B, 0);
173         macb_or_gem_writel(bp, SA2T, 0);
174         macb_or_gem_writel(bp, SA3B, 0);
175         macb_or_gem_writel(bp, SA3T, 0);
176         macb_or_gem_writel(bp, SA4B, 0);
177         macb_or_gem_writel(bp, SA4T, 0);
178 }
179 
180 static void macb_get_hwaddr(struct macb *bp)
181 {
182         struct macb_platform_data *pdata;
183         u32 bottom;
184         u16 top;
185         u8 addr[6];
186         int i;
187 
188         pdata = dev_get_platdata(&bp->pdev->dev);
189 
190         /* Check all 4 address register for valid address */
191         for (i = 0; i < 4; i++) {
192                 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
193                 top = macb_or_gem_readl(bp, SA1T + i * 8);
194 
195                 if (pdata && pdata->rev_eth_addr) {
196                         addr[5] = bottom & 0xff;
197                         addr[4] = (bottom >> 8) & 0xff;
198                         addr[3] = (bottom >> 16) & 0xff;
199                         addr[2] = (bottom >> 24) & 0xff;
200                         addr[1] = top & 0xff;
201                         addr[0] = (top & 0xff00) >> 8;
202                 } else {
203                         addr[0] = bottom & 0xff;
204                         addr[1] = (bottom >> 8) & 0xff;
205                         addr[2] = (bottom >> 16) & 0xff;
206                         addr[3] = (bottom >> 24) & 0xff;
207                         addr[4] = top & 0xff;
208                         addr[5] = (top >> 8) & 0xff;
209                 }
210 
211                 if (is_valid_ether_addr(addr)) {
212                         memcpy(bp->dev->dev_addr, addr, sizeof(addr));
213                         return;
214                 }
215         }
216 
217         dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
218         eth_hw_addr_random(bp->dev);
219 }
220 
221 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
222 {
223         struct macb *bp = bus->priv;
224         int value;
225 
226         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
227                               | MACB_BF(RW, MACB_MAN_READ)
228                               | MACB_BF(PHYA, mii_id)
229                               | MACB_BF(REGA, regnum)
230                               | MACB_BF(CODE, MACB_MAN_CODE)));
231 
232         /* wait for end of transfer */
233         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
234                 cpu_relax();
235 
236         value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
237 
238         return value;
239 }
240 
241 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
242                            u16 value)
243 {
244         struct macb *bp = bus->priv;
245 
246         macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
247                               | MACB_BF(RW, MACB_MAN_WRITE)
248                               | MACB_BF(PHYA, mii_id)
249                               | MACB_BF(REGA, regnum)
250                               | MACB_BF(CODE, MACB_MAN_CODE)
251                               | MACB_BF(DATA, value)));
252 
253         /* wait for end of transfer */
254         while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
255                 cpu_relax();
256 
257         return 0;
258 }
259 
260 /**
261  * macb_set_tx_clk() - Set a clock to a new frequency
262  * @clk         Pointer to the clock to change
263  * @rate        New frequency in Hz
264  * @dev         Pointer to the struct net_device
265  */
266 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
267 {
268         long ferr, rate, rate_rounded;
269 
270         if (!clk)
271                 return;
272 
273         switch (speed) {
274         case SPEED_10:
275                 rate = 2500000;
276                 break;
277         case SPEED_100:
278                 rate = 25000000;
279                 break;
280         case SPEED_1000:
281                 rate = 125000000;
282                 break;
283         default:
284                 return;
285         }
286 
287         rate_rounded = clk_round_rate(clk, rate);
288         if (rate_rounded < 0)
289                 return;
290 
291         /* RGMII allows 50 ppm frequency error. Test and warn if this limit
292          * is not satisfied.
293          */
294         ferr = abs(rate_rounded - rate);
295         ferr = DIV_ROUND_UP(ferr, rate / 100000);
296         if (ferr > 5)
297                 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
298                             rate);
299 
300         if (clk_set_rate(clk, rate_rounded))
301                 netdev_err(dev, "adjusting tx_clk failed.\n");
302 }
303 
304 static void macb_handle_link_change(struct net_device *dev)
305 {
306         struct macb *bp = netdev_priv(dev);
307         struct phy_device *phydev = dev->phydev;
308         unsigned long flags;
309         int status_change = 0;
310 
311         spin_lock_irqsave(&bp->lock, flags);
312 
313         if (phydev->link) {
314                 if ((bp->speed != phydev->speed) ||
315                     (bp->duplex != phydev->duplex)) {
316                         u32 reg;
317 
318                         reg = macb_readl(bp, NCFGR);
319                         reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
320                         if (macb_is_gem(bp))
321                                 reg &= ~GEM_BIT(GBE);
322 
323                         if (phydev->duplex)
324                                 reg |= MACB_BIT(FD);
325                         if (phydev->speed == SPEED_100)
326                                 reg |= MACB_BIT(SPD);
327                         if (phydev->speed == SPEED_1000 &&
328                             bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
329                                 reg |= GEM_BIT(GBE);
330 
331                         macb_or_gem_writel(bp, NCFGR, reg);
332 
333                         bp->speed = phydev->speed;
334                         bp->duplex = phydev->duplex;
335                         status_change = 1;
336                 }
337         }
338 
339         if (phydev->link != bp->link) {
340                 if (!phydev->link) {
341                         bp->speed = 0;
342                         bp->duplex = -1;
343                 }
344                 bp->link = phydev->link;
345 
346                 status_change = 1;
347         }
348 
349         spin_unlock_irqrestore(&bp->lock, flags);
350 
351         if (status_change) {
352                 if (phydev->link) {
353                         /* Update the TX clock rate if and only if the link is
354                          * up and there has been a link change.
355                          */
356                         macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
357 
358                         netif_carrier_on(dev);
359                         netdev_info(dev, "link up (%d/%s)\n",
360                                     phydev->speed,
361                                     phydev->duplex == DUPLEX_FULL ?
362                                     "Full" : "Half");
363                 } else {
364                         netif_carrier_off(dev);
365                         netdev_info(dev, "link down\n");
366                 }
367         }
368 }
369 
370 /* based on au1000_eth. c*/
371 static int macb_mii_probe(struct net_device *dev)
372 {
373         struct macb *bp = netdev_priv(dev);
374         struct macb_platform_data *pdata;
375         struct phy_device *phydev;
376         int phy_irq;
377         int ret;
378 
379         phydev = phy_find_first(bp->mii_bus);
380         if (!phydev) {
381                 netdev_err(dev, "no PHY found\n");
382                 return -ENXIO;
383         }
384 
385         pdata = dev_get_platdata(&bp->pdev->dev);
386         if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
387                 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
388                                         "phy int");
389                 if (!ret) {
390                         phy_irq = gpio_to_irq(pdata->phy_irq_pin);
391                         phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
392                 }
393         }
394 
395         /* attach the mac to the phy */
396         ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
397                                  bp->phy_interface);
398         if (ret) {
399                 netdev_err(dev, "Could not attach to PHY\n");
400                 return ret;
401         }
402 
403         /* mask with MAC supported features */
404         if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
405                 phydev->supported &= PHY_GBIT_FEATURES;
406         else
407                 phydev->supported &= PHY_BASIC_FEATURES;
408 
409         if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
410                 phydev->supported &= ~SUPPORTED_1000baseT_Half;
411 
412         phydev->advertising = phydev->supported;
413 
414         bp->link = 0;
415         bp->speed = 0;
416         bp->duplex = -1;
417 
418         return 0;
419 }
420 
421 static int macb_mii_init(struct macb *bp)
422 {
423         struct macb_platform_data *pdata;
424         struct device_node *np;
425         int err = -ENXIO, i;
426 
427         /* Enable management port */
428         macb_writel(bp, NCR, MACB_BIT(MPE));
429 
430         bp->mii_bus = mdiobus_alloc();
431         if (!bp->mii_bus) {
432                 err = -ENOMEM;
433                 goto err_out;
434         }
435 
436         bp->mii_bus->name = "MACB_mii_bus";
437         bp->mii_bus->read = &macb_mdio_read;
438         bp->mii_bus->write = &macb_mdio_write;
439         snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
440                  bp->pdev->name, bp->pdev->id);
441         bp->mii_bus->priv = bp;
442         bp->mii_bus->parent = &bp->pdev->dev;
443         pdata = dev_get_platdata(&bp->pdev->dev);
444 
445         dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
446 
447         np = bp->pdev->dev.of_node;
448         if (np) {
449                 /* try dt phy registration */
450                 err = of_mdiobus_register(bp->mii_bus, np);
451 
452                 /* fallback to standard phy registration if no phy were
453                  * found during dt phy registration
454                  */
455                 if (!err && !phy_find_first(bp->mii_bus)) {
456                         for (i = 0; i < PHY_MAX_ADDR; i++) {
457                                 struct phy_device *phydev;
458 
459                                 phydev = mdiobus_scan(bp->mii_bus, i);
460                                 if (IS_ERR(phydev) &&
461                                     PTR_ERR(phydev) != -ENODEV) {
462                                         err = PTR_ERR(phydev);
463                                         break;
464                                 }
465                         }
466 
467                         if (err)
468                                 goto err_out_unregister_bus;
469                 }
470         } else {
471                 if (pdata)
472                         bp->mii_bus->phy_mask = pdata->phy_mask;
473 
474                 err = mdiobus_register(bp->mii_bus);
475         }
476 
477         if (err)
478                 goto err_out_free_mdiobus;
479 
480         err = macb_mii_probe(bp->dev);
481         if (err)
482                 goto err_out_unregister_bus;
483 
484         return 0;
485 
486 err_out_unregister_bus:
487         mdiobus_unregister(bp->mii_bus);
488 err_out_free_mdiobus:
489         mdiobus_free(bp->mii_bus);
490 err_out:
491         return err;
492 }
493 
494 static void macb_update_stats(struct macb *bp)
495 {
496         u32 *p = &bp->hw_stats.macb.rx_pause_frames;
497         u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
498         int offset = MACB_PFR;
499 
500         WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
501 
502         for (; p < end; p++, offset += 4)
503                 *p += bp->macb_reg_readl(bp, offset);
504 }
505 
506 static int macb_halt_tx(struct macb *bp)
507 {
508         unsigned long   halt_time, timeout;
509         u32             status;
510 
511         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
512 
513         timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
514         do {
515                 halt_time = jiffies;
516                 status = macb_readl(bp, TSR);
517                 if (!(status & MACB_BIT(TGO)))
518                         return 0;
519 
520                 usleep_range(10, 250);
521         } while (time_before(halt_time, timeout));
522 
523         return -ETIMEDOUT;
524 }
525 
526 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
527 {
528         if (tx_skb->mapping) {
529                 if (tx_skb->mapped_as_page)
530                         dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
531                                        tx_skb->size, DMA_TO_DEVICE);
532                 else
533                         dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
534                                          tx_skb->size, DMA_TO_DEVICE);
535                 tx_skb->mapping = 0;
536         }
537 
538         if (tx_skb->skb) {
539                 dev_kfree_skb_any(tx_skb->skb);
540                 tx_skb->skb = NULL;
541         }
542 }
543 
544 static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
545 {
546         desc->addr = (u32)addr;
547 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
548         desc->addrh = (u32)(addr >> 32);
549 #endif
550 }
551 
552 static void macb_tx_error_task(struct work_struct *work)
553 {
554         struct macb_queue       *queue = container_of(work, struct macb_queue,
555                                                       tx_error_task);
556         struct macb             *bp = queue->bp;
557         struct macb_tx_skb      *tx_skb;
558         struct macb_dma_desc    *desc;
559         struct sk_buff          *skb;
560         unsigned int            tail;
561         unsigned long           flags;
562 
563         netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
564                     (unsigned int)(queue - bp->queues),
565                     queue->tx_tail, queue->tx_head);
566 
567         /* Prevent the queue IRQ handlers from running: each of them may call
568          * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
569          * As explained below, we have to halt the transmission before updating
570          * TBQP registers so we call netif_tx_stop_all_queues() to notify the
571          * network engine about the macb/gem being halted.
572          */
573         spin_lock_irqsave(&bp->lock, flags);
574 
575         /* Make sure nobody is trying to queue up new packets */
576         netif_tx_stop_all_queues(bp->dev);
577 
578         /* Stop transmission now
579          * (in case we have just queued new packets)
580          * macb/gem must be halted to write TBQP register
581          */
582         if (macb_halt_tx(bp))
583                 /* Just complain for now, reinitializing TX path can be good */
584                 netdev_err(bp->dev, "BUG: halt tx timed out\n");
585 
586         /* Treat frames in TX queue including the ones that caused the error.
587          * Free transmit buffers in upper layer.
588          */
589         for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
590                 u32     ctrl;
591 
592                 desc = macb_tx_desc(queue, tail);
593                 ctrl = desc->ctrl;
594                 tx_skb = macb_tx_skb(queue, tail);
595                 skb = tx_skb->skb;
596 
597                 if (ctrl & MACB_BIT(TX_USED)) {
598                         /* skb is set for the last buffer of the frame */
599                         while (!skb) {
600                                 macb_tx_unmap(bp, tx_skb);
601                                 tail++;
602                                 tx_skb = macb_tx_skb(queue, tail);
603                                 skb = tx_skb->skb;
604                         }
605 
606                         /* ctrl still refers to the first buffer descriptor
607                          * since it's the only one written back by the hardware
608                          */
609                         if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
610                                 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
611                                             macb_tx_ring_wrap(tail), skb->data);
612                                 bp->stats.tx_packets++;
613                                 bp->stats.tx_bytes += skb->len;
614                         }
615                 } else {
616                         /* "Buffers exhausted mid-frame" errors may only happen
617                          * if the driver is buggy, so complain loudly about
618                          * those. Statistics are updated by hardware.
619                          */
620                         if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
621                                 netdev_err(bp->dev,
622                                            "BUG: TX buffers exhausted mid-frame\n");
623 
624                         desc->ctrl = ctrl | MACB_BIT(TX_USED);
625                 }
626 
627                 macb_tx_unmap(bp, tx_skb);
628         }
629 
630         /* Set end of TX queue */
631         desc = macb_tx_desc(queue, 0);
632         macb_set_addr(desc, 0);
633         desc->ctrl = MACB_BIT(TX_USED);
634 
635         /* Make descriptor updates visible to hardware */
636         wmb();
637 
638         /* Reinitialize the TX desc queue */
639         queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
640 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
641         queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
642 #endif
643         /* Make TX ring reflect state of hardware */
644         queue->tx_head = 0;
645         queue->tx_tail = 0;
646 
647         /* Housework before enabling TX IRQ */
648         macb_writel(bp, TSR, macb_readl(bp, TSR));
649         queue_writel(queue, IER, MACB_TX_INT_FLAGS);
650 
651         /* Now we are ready to start transmission again */
652         netif_tx_start_all_queues(bp->dev);
653         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
654 
655         spin_unlock_irqrestore(&bp->lock, flags);
656 }
657 
658 static void macb_tx_interrupt(struct macb_queue *queue)
659 {
660         unsigned int tail;
661         unsigned int head;
662         u32 status;
663         struct macb *bp = queue->bp;
664         u16 queue_index = queue - bp->queues;
665 
666         status = macb_readl(bp, TSR);
667         macb_writel(bp, TSR, status);
668 
669         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
670                 queue_writel(queue, ISR, MACB_BIT(TCOMP));
671 
672         netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
673                     (unsigned long)status);
674 
675         head = queue->tx_head;
676         for (tail = queue->tx_tail; tail != head; tail++) {
677                 struct macb_tx_skb      *tx_skb;
678                 struct sk_buff          *skb;
679                 struct macb_dma_desc    *desc;
680                 u32                     ctrl;
681 
682                 desc = macb_tx_desc(queue, tail);
683 
684                 /* Make hw descriptor updates visible to CPU */
685                 rmb();
686 
687                 ctrl = desc->ctrl;
688 
689                 /* TX_USED bit is only set by hardware on the very first buffer
690                  * descriptor of the transmitted frame.
691                  */
692                 if (!(ctrl & MACB_BIT(TX_USED)))
693                         break;
694 
695                 /* Process all buffers of the current transmitted frame */
696                 for (;; tail++) {
697                         tx_skb = macb_tx_skb(queue, tail);
698                         skb = tx_skb->skb;
699 
700                         /* First, update TX stats if needed */
701                         if (skb) {
702                                 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
703                                             macb_tx_ring_wrap(tail), skb->data);
704                                 bp->stats.tx_packets++;
705                                 bp->stats.tx_bytes += skb->len;
706                         }
707 
708                         /* Now we can safely release resources */
709                         macb_tx_unmap(bp, tx_skb);
710 
711                         /* skb is set only for the last buffer of the frame.
712                          * WARNING: at this point skb has been freed by
713                          * macb_tx_unmap().
714                          */
715                         if (skb)
716                                 break;
717                 }
718         }
719 
720         queue->tx_tail = tail;
721         if (__netif_subqueue_stopped(bp->dev, queue_index) &&
722             CIRC_CNT(queue->tx_head, queue->tx_tail,
723                      TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
724                 netif_wake_subqueue(bp->dev, queue_index);
725 }
726 
727 static void gem_rx_refill(struct macb *bp)
728 {
729         unsigned int            entry;
730         struct sk_buff          *skb;
731         dma_addr_t              paddr;
732 
733         while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
734                           RX_RING_SIZE) > 0) {
735                 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
736 
737                 /* Make hw descriptor updates visible to CPU */
738                 rmb();
739 
740                 bp->rx_prepared_head++;
741 
742                 if (!bp->rx_skbuff[entry]) {
743                         /* allocate sk_buff for this free entry in ring */
744                         skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
745                         if (unlikely(!skb)) {
746                                 netdev_err(bp->dev,
747                                            "Unable to allocate sk_buff\n");
748                                 break;
749                         }
750 
751                         /* now fill corresponding descriptor entry */
752                         paddr = dma_map_single(&bp->pdev->dev, skb->data,
753                                                bp->rx_buffer_size,
754                                                DMA_FROM_DEVICE);
755                         if (dma_mapping_error(&bp->pdev->dev, paddr)) {
756                                 dev_kfree_skb(skb);
757                                 break;
758                         }
759 
760                         bp->rx_skbuff[entry] = skb;
761 
762                         if (entry == RX_RING_SIZE - 1)
763                                 paddr |= MACB_BIT(RX_WRAP);
764                         macb_set_addr(&(bp->rx_ring[entry]), paddr);
765                         bp->rx_ring[entry].ctrl = 0;
766 
767                         /* properly align Ethernet header */
768                         skb_reserve(skb, NET_IP_ALIGN);
769                 } else {
770                         bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
771                         bp->rx_ring[entry].ctrl = 0;
772                 }
773         }
774 
775         /* Make descriptor updates visible to hardware */
776         wmb();
777 
778         netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
779                     bp->rx_prepared_head, bp->rx_tail);
780 }
781 
782 /* Mark DMA descriptors from begin up to and not including end as unused */
783 static void discard_partial_frame(struct macb *bp, unsigned int begin,
784                                   unsigned int end)
785 {
786         unsigned int frag;
787 
788         for (frag = begin; frag != end; frag++) {
789                 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
790 
791                 desc->addr &= ~MACB_BIT(RX_USED);
792         }
793 
794         /* Make descriptor updates visible to hardware */
795         wmb();
796 
797         /* When this happens, the hardware stats registers for
798          * whatever caused this is updated, so we don't have to record
799          * anything.
800          */
801 }
802 
803 static int gem_rx(struct macb *bp, int budget)
804 {
805         unsigned int            len;
806         unsigned int            entry;
807         struct sk_buff          *skb;
808         struct macb_dma_desc    *desc;
809         int                     count = 0;
810 
811         while (count < budget) {
812                 u32 ctrl;
813                 dma_addr_t addr;
814                 bool rxused;
815 
816                 entry = macb_rx_ring_wrap(bp->rx_tail);
817                 desc = &bp->rx_ring[entry];
818 
819                 /* Make hw descriptor updates visible to CPU */
820                 rmb();
821 
822                 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
823                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
824 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
825                 addr |= ((u64)(desc->addrh) << 32);
826 #endif
827                 ctrl = desc->ctrl;
828 
829                 if (!rxused)
830                         break;
831 
832                 bp->rx_tail++;
833                 count++;
834 
835                 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
836                         netdev_err(bp->dev,
837                                    "not whole frame pointed by descriptor\n");
838                         bp->stats.rx_dropped++;
839                         break;
840                 }
841                 skb = bp->rx_skbuff[entry];
842                 if (unlikely(!skb)) {
843                         netdev_err(bp->dev,
844                                    "inconsistent Rx descriptor chain\n");
845                         bp->stats.rx_dropped++;
846                         break;
847                 }
848                 /* now everything is ready for receiving packet */
849                 bp->rx_skbuff[entry] = NULL;
850                 len = ctrl & bp->rx_frm_len_mask;
851 
852                 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
853 
854                 skb_put(skb, len);
855                 dma_unmap_single(&bp->pdev->dev, addr,
856                                  bp->rx_buffer_size, DMA_FROM_DEVICE);
857 
858                 skb->protocol = eth_type_trans(skb, bp->dev);
859                 skb_checksum_none_assert(skb);
860                 if (bp->dev->features & NETIF_F_RXCSUM &&
861                     !(bp->dev->flags & IFF_PROMISC) &&
862                     GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
863                         skb->ip_summed = CHECKSUM_UNNECESSARY;
864 
865                 bp->stats.rx_packets++;
866                 bp->stats.rx_bytes += skb->len;
867 
868 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
869                 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
870                             skb->len, skb->csum);
871                 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
872                                skb_mac_header(skb), 16, true);
873                 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
874                                skb->data, 32, true);
875 #endif
876 
877                 netif_receive_skb(skb);
878         }
879 
880         gem_rx_refill(bp);
881 
882         return count;
883 }
884 
885 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
886                          unsigned int last_frag)
887 {
888         unsigned int len;
889         unsigned int frag;
890         unsigned int offset;
891         struct sk_buff *skb;
892         struct macb_dma_desc *desc;
893 
894         desc = macb_rx_desc(bp, last_frag);
895         len = desc->ctrl & bp->rx_frm_len_mask;
896 
897         netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
898                     macb_rx_ring_wrap(first_frag),
899                     macb_rx_ring_wrap(last_frag), len);
900 
901         /* The ethernet header starts NET_IP_ALIGN bytes into the
902          * first buffer. Since the header is 14 bytes, this makes the
903          * payload word-aligned.
904          *
905          * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
906          * the two padding bytes into the skb so that we avoid hitting
907          * the slowpath in memcpy(), and pull them off afterwards.
908          */
909         skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
910         if (!skb) {
911                 bp->stats.rx_dropped++;
912                 for (frag = first_frag; ; frag++) {
913                         desc = macb_rx_desc(bp, frag);
914                         desc->addr &= ~MACB_BIT(RX_USED);
915                         if (frag == last_frag)
916                                 break;
917                 }
918 
919                 /* Make descriptor updates visible to hardware */
920                 wmb();
921 
922                 return 1;
923         }
924 
925         offset = 0;
926         len += NET_IP_ALIGN;
927         skb_checksum_none_assert(skb);
928         skb_put(skb, len);
929 
930         for (frag = first_frag; ; frag++) {
931                 unsigned int frag_len = bp->rx_buffer_size;
932 
933                 if (offset + frag_len > len) {
934                         if (unlikely(frag != last_frag)) {
935                                 dev_kfree_skb_any(skb);
936                                 return -1;
937                         }
938                         frag_len = len - offset;
939                 }
940                 skb_copy_to_linear_data_offset(skb, offset,
941                                                macb_rx_buffer(bp, frag),
942                                                frag_len);
943                 offset += bp->rx_buffer_size;
944                 desc = macb_rx_desc(bp, frag);
945                 desc->addr &= ~MACB_BIT(RX_USED);
946 
947                 if (frag == last_frag)
948                         break;
949         }
950 
951         /* Make descriptor updates visible to hardware */
952         wmb();
953 
954         __skb_pull(skb, NET_IP_ALIGN);
955         skb->protocol = eth_type_trans(skb, bp->dev);
956 
957         bp->stats.rx_packets++;
958         bp->stats.rx_bytes += skb->len;
959         netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
960                     skb->len, skb->csum);
961         netif_receive_skb(skb);
962 
963         return 0;
964 }
965 
966 static inline void macb_init_rx_ring(struct macb *bp)
967 {
968         dma_addr_t addr;
969         int i;
970 
971         addr = bp->rx_buffers_dma;
972         for (i = 0; i < RX_RING_SIZE; i++) {
973                 bp->rx_ring[i].addr = addr;
974                 bp->rx_ring[i].ctrl = 0;
975                 addr += bp->rx_buffer_size;
976         }
977         bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
978         bp->rx_tail = 0;
979 }
980 
981 static int macb_rx(struct macb *bp, int budget)
982 {
983         bool reset_rx_queue = false;
984         int received = 0;
985         unsigned int tail;
986         int first_frag = -1;
987 
988         for (tail = bp->rx_tail; budget > 0; tail++) {
989                 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
990                 u32 addr, ctrl;
991 
992                 /* Make hw descriptor updates visible to CPU */
993                 rmb();
994 
995                 addr = desc->addr;
996                 ctrl = desc->ctrl;
997 
998                 if (!(addr & MACB_BIT(RX_USED)))
999                         break;
1000 
1001                 if (ctrl & MACB_BIT(RX_SOF)) {
1002                         if (first_frag != -1)
1003                                 discard_partial_frame(bp, first_frag, tail);
1004                         first_frag = tail;
1005                 }
1006 
1007                 if (ctrl & MACB_BIT(RX_EOF)) {
1008                         int dropped;
1009 
1010                         if (unlikely(first_frag == -1)) {
1011                                 reset_rx_queue = true;
1012                                 continue;
1013                         }
1014 
1015                         dropped = macb_rx_frame(bp, first_frag, tail);
1016                         first_frag = -1;
1017                         if (unlikely(dropped < 0)) {
1018                                 reset_rx_queue = true;
1019                                 continue;
1020                         }
1021                         if (!dropped) {
1022                                 received++;
1023                                 budget--;
1024                         }
1025                 }
1026         }
1027 
1028         if (unlikely(reset_rx_queue)) {
1029                 unsigned long flags;
1030                 u32 ctrl;
1031 
1032                 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1033 
1034                 spin_lock_irqsave(&bp->lock, flags);
1035 
1036                 ctrl = macb_readl(bp, NCR);
1037                 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1038 
1039                 macb_init_rx_ring(bp);
1040                 macb_writel(bp, RBQP, bp->rx_ring_dma);
1041 
1042                 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1043 
1044                 spin_unlock_irqrestore(&bp->lock, flags);
1045                 return received;
1046         }
1047 
1048         if (first_frag != -1)
1049                 bp->rx_tail = first_frag;
1050         else
1051                 bp->rx_tail = tail;
1052 
1053         return received;
1054 }
1055 
1056 static int macb_poll(struct napi_struct *napi, int budget)
1057 {
1058         struct macb *bp = container_of(napi, struct macb, napi);
1059         int work_done;
1060         u32 status;
1061 
1062         status = macb_readl(bp, RSR);
1063         macb_writel(bp, RSR, status);
1064 
1065         work_done = 0;
1066 
1067         netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1068                     (unsigned long)status, budget);
1069 
1070         work_done = bp->macbgem_ops.mog_rx(bp, budget);
1071         if (work_done < budget) {
1072                 napi_complete(napi);
1073 
1074                 /* Packets received while interrupts were disabled */
1075                 status = macb_readl(bp, RSR);
1076                 if (status) {
1077                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1078                                 macb_writel(bp, ISR, MACB_BIT(RCOMP));
1079                         napi_reschedule(napi);
1080                 } else {
1081                         macb_writel(bp, IER, MACB_RX_INT_FLAGS);
1082                 }
1083         }
1084 
1085         /* TODO: Handle errors */
1086 
1087         return work_done;
1088 }
1089 
1090 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1091 {
1092         struct macb_queue *queue = dev_id;
1093         struct macb *bp = queue->bp;
1094         struct net_device *dev = bp->dev;
1095         u32 status, ctrl;
1096 
1097         status = queue_readl(queue, ISR);
1098 
1099         if (unlikely(!status))
1100                 return IRQ_NONE;
1101 
1102         spin_lock(&bp->lock);
1103 
1104         while (status) {
1105                 /* close possible race with dev_close */
1106                 if (unlikely(!netif_running(dev))) {
1107                         queue_writel(queue, IDR, -1);
1108                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1109                                 queue_writel(queue, ISR, -1);
1110                         break;
1111                 }
1112 
1113                 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1114                             (unsigned int)(queue - bp->queues),
1115                             (unsigned long)status);
1116 
1117                 if (status & MACB_RX_INT_FLAGS) {
1118                         /* There's no point taking any more interrupts
1119                          * until we have processed the buffers. The
1120                          * scheduling call may fail if the poll routine
1121                          * is already scheduled, so disable interrupts
1122                          * now.
1123                          */
1124                         queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1125                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1126                                 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1127 
1128                         if (napi_schedule_prep(&bp->napi)) {
1129                                 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1130                                 __napi_schedule(&bp->napi);
1131                         }
1132                 }
1133 
1134                 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1135                         queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1136                         schedule_work(&queue->tx_error_task);
1137 
1138                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1139                                 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1140 
1141                         break;
1142                 }
1143 
1144                 if (status & MACB_BIT(TCOMP))
1145                         macb_tx_interrupt(queue);
1146 
1147                 /* Link change detection isn't possible with RMII, so we'll
1148                  * add that if/when we get our hands on a full-blown MII PHY.
1149                  */
1150 
1151                 /* There is a hardware issue under heavy load where DMA can
1152                  * stop, this causes endless "used buffer descriptor read"
1153                  * interrupts but it can be cleared by re-enabling RX. See
1154                  * the at91 manual, section 41.3.1 or the Zynq manual
1155                  * section 16.7.4 for details.
1156                  */
1157                 if (status & MACB_BIT(RXUBR)) {
1158                         ctrl = macb_readl(bp, NCR);
1159                         macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1160                         wmb();
1161                         macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1162 
1163                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1164                                 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1165                 }
1166 
1167                 if (status & MACB_BIT(ISR_ROVR)) {
1168                         /* We missed at least one packet */
1169                         if (macb_is_gem(bp))
1170                                 bp->hw_stats.gem.rx_overruns++;
1171                         else
1172                                 bp->hw_stats.macb.rx_overruns++;
1173 
1174                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1175                                 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1176                 }
1177 
1178                 if (status & MACB_BIT(HRESP)) {
1179                         /* TODO: Reset the hardware, and maybe move the
1180                          * netdev_err to a lower-priority context as well
1181                          * (work queue?)
1182                          */
1183                         netdev_err(dev, "DMA bus error: HRESP not OK\n");
1184 
1185                         if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1186                                 queue_writel(queue, ISR, MACB_BIT(HRESP));
1187                 }
1188 
1189                 status = queue_readl(queue, ISR);
1190         }
1191 
1192         spin_unlock(&bp->lock);
1193 
1194         return IRQ_HANDLED;
1195 }
1196 
1197 #ifdef CONFIG_NET_POLL_CONTROLLER
1198 /* Polling receive - used by netconsole and other diagnostic tools
1199  * to allow network i/o with interrupts disabled.
1200  */
1201 static void macb_poll_controller(struct net_device *dev)
1202 {
1203         struct macb *bp = netdev_priv(dev);
1204         struct macb_queue *queue;
1205         unsigned long flags;
1206         unsigned int q;
1207 
1208         local_irq_save(flags);
1209         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1210                 macb_interrupt(dev->irq, queue);
1211         local_irq_restore(flags);
1212 }
1213 #endif
1214 
1215 static unsigned int macb_tx_map(struct macb *bp,
1216                                 struct macb_queue *queue,
1217                                 struct sk_buff *skb)
1218 {
1219         dma_addr_t mapping;
1220         unsigned int len, entry, i, tx_head = queue->tx_head;
1221         struct macb_tx_skb *tx_skb = NULL;
1222         struct macb_dma_desc *desc;
1223         unsigned int offset, size, count = 0;
1224         unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1225         unsigned int eof = 1;
1226         u32 ctrl;
1227 
1228         /* First, map non-paged data */
1229         len = skb_headlen(skb);
1230         offset = 0;
1231         while (len) {
1232                 size = min(len, bp->max_tx_length);
1233                 entry = macb_tx_ring_wrap(tx_head);
1234                 tx_skb = &queue->tx_skb[entry];
1235 
1236                 mapping = dma_map_single(&bp->pdev->dev,
1237                                          skb->data + offset,
1238                                          size, DMA_TO_DEVICE);
1239                 if (dma_mapping_error(&bp->pdev->dev, mapping))
1240                         goto dma_error;
1241 
1242                 /* Save info to properly release resources */
1243                 tx_skb->skb = NULL;
1244                 tx_skb->mapping = mapping;
1245                 tx_skb->size = size;
1246                 tx_skb->mapped_as_page = false;
1247 
1248                 len -= size;
1249                 offset += size;
1250                 count++;
1251                 tx_head++;
1252         }
1253 
1254         /* Then, map paged data from fragments */
1255         for (f = 0; f < nr_frags; f++) {
1256                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1257 
1258                 len = skb_frag_size(frag);
1259                 offset = 0;
1260                 while (len) {
1261                         size = min(len, bp->max_tx_length);
1262                         entry = macb_tx_ring_wrap(tx_head);
1263                         tx_skb = &queue->tx_skb[entry];
1264 
1265                         mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1266                                                    offset, size, DMA_TO_DEVICE);
1267                         if (dma_mapping_error(&bp->pdev->dev, mapping))
1268                                 goto dma_error;
1269 
1270                         /* Save info to properly release resources */
1271                         tx_skb->skb = NULL;
1272                         tx_skb->mapping = mapping;
1273                         tx_skb->size = size;
1274                         tx_skb->mapped_as_page = true;
1275 
1276                         len -= size;
1277                         offset += size;
1278                         count++;
1279                         tx_head++;
1280                 }
1281         }
1282 
1283         /* Should never happen */
1284         if (unlikely(!tx_skb)) {
1285                 netdev_err(bp->dev, "BUG! empty skb!\n");
1286                 return 0;
1287         }
1288 
1289         /* This is the last buffer of the frame: save socket buffer */
1290         tx_skb->skb = skb;
1291 
1292         /* Update TX ring: update buffer descriptors in reverse order
1293          * to avoid race condition
1294          */
1295 
1296         /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1297          * to set the end of TX queue
1298          */
1299         i = tx_head;
1300         entry = macb_tx_ring_wrap(i);
1301         ctrl = MACB_BIT(TX_USED);
1302         desc = &queue->tx_ring[entry];
1303         desc->ctrl = ctrl;
1304 
1305         do {
1306                 i--;
1307                 entry = macb_tx_ring_wrap(i);
1308                 tx_skb = &queue->tx_skb[entry];
1309                 desc = &queue->tx_ring[entry];
1310 
1311                 ctrl = (u32)tx_skb->size;
1312                 if (eof) {
1313                         ctrl |= MACB_BIT(TX_LAST);
1314                         eof = 0;
1315                 }
1316                 if (unlikely(entry == (TX_RING_SIZE - 1)))
1317                         ctrl |= MACB_BIT(TX_WRAP);
1318 
1319                 /* Set TX buffer descriptor */
1320                 macb_set_addr(desc, tx_skb->mapping);
1321                 /* desc->addr must be visible to hardware before clearing
1322                  * 'TX_USED' bit in desc->ctrl.
1323                  */
1324                 wmb();
1325                 desc->ctrl = ctrl;
1326         } while (i != queue->tx_head);
1327 
1328         queue->tx_head = tx_head;
1329 
1330         return count;
1331 
1332 dma_error:
1333         netdev_err(bp->dev, "TX DMA map failed\n");
1334 
1335         for (i = queue->tx_head; i != tx_head; i++) {
1336                 tx_skb = macb_tx_skb(queue, i);
1337 
1338                 macb_tx_unmap(bp, tx_skb);
1339         }
1340 
1341         return 0;
1342 }
1343 
1344 static inline int macb_clear_csum(struct sk_buff *skb)
1345 {
1346         /* no change for packets without checksum offloading */
1347         if (skb->ip_summed != CHECKSUM_PARTIAL)
1348                 return 0;
1349 
1350         /* make sure we can modify the header */
1351         if (unlikely(skb_cow_head(skb, 0)))
1352                 return -1;
1353 
1354         /* initialize checksum field
1355          * This is required - at least for Zynq, which otherwise calculates
1356          * wrong UDP header checksums for UDP packets with UDP data len <=2
1357          */
1358         *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1359         return 0;
1360 }
1361 
1362 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1363 {
1364         u16 queue_index = skb_get_queue_mapping(skb);
1365         struct macb *bp = netdev_priv(dev);
1366         struct macb_queue *queue = &bp->queues[queue_index];
1367         unsigned long flags;
1368         unsigned int count, nr_frags, frag_size, f;
1369 
1370 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1371         netdev_vdbg(bp->dev,
1372                     "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1373                     queue_index, skb->len, skb->head, skb->data,
1374                     skb_tail_pointer(skb), skb_end_pointer(skb));
1375         print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1376                        skb->data, 16, true);
1377 #endif
1378 
1379         /* Count how many TX buffer descriptors are needed to send this
1380          * socket buffer: skb fragments of jumbo frames may need to be
1381          * split into many buffer descriptors.
1382          */
1383         count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1384         nr_frags = skb_shinfo(skb)->nr_frags;
1385         for (f = 0; f < nr_frags; f++) {
1386                 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1387                 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1388         }
1389 
1390         spin_lock_irqsave(&bp->lock, flags);
1391 
1392         /* This is a hard error, log it. */
1393         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
1394                 netif_stop_subqueue(dev, queue_index);
1395                 spin_unlock_irqrestore(&bp->lock, flags);
1396                 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1397                            queue->tx_head, queue->tx_tail);
1398                 return NETDEV_TX_BUSY;
1399         }
1400 
1401         if (macb_clear_csum(skb)) {
1402                 dev_kfree_skb_any(skb);
1403                 goto unlock;
1404         }
1405 
1406         /* Map socket buffer for DMA transfer */
1407         if (!macb_tx_map(bp, queue, skb)) {
1408                 dev_kfree_skb_any(skb);
1409                 goto unlock;
1410         }
1411 
1412         /* Make newly initialized descriptor visible to hardware */
1413         wmb();
1414 
1415         skb_tx_timestamp(skb);
1416 
1417         macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1418 
1419         if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
1420                 netif_stop_subqueue(dev, queue_index);
1421 
1422 unlock:
1423         spin_unlock_irqrestore(&bp->lock, flags);
1424 
1425         return NETDEV_TX_OK;
1426 }
1427 
1428 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1429 {
1430         if (!macb_is_gem(bp)) {
1431                 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1432         } else {
1433                 bp->rx_buffer_size = size;
1434 
1435                 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1436                         netdev_dbg(bp->dev,
1437                                    "RX buffer must be multiple of %d bytes, expanding\n",
1438                                    RX_BUFFER_MULTIPLE);
1439                         bp->rx_buffer_size =
1440                                 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1441                 }
1442         }
1443 
1444         netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1445                    bp->dev->mtu, bp->rx_buffer_size);
1446 }
1447 
1448 static void gem_free_rx_buffers(struct macb *bp)
1449 {
1450         struct sk_buff          *skb;
1451         struct macb_dma_desc    *desc;
1452         dma_addr_t              addr;
1453         int i;
1454 
1455         if (!bp->rx_skbuff)
1456                 return;
1457 
1458         for (i = 0; i < RX_RING_SIZE; i++) {
1459                 skb = bp->rx_skbuff[i];
1460 
1461                 if (!skb)
1462                         continue;
1463 
1464                 desc = &bp->rx_ring[i];
1465                 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1466 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1467                 addr |= ((u64)(desc->addrh) << 32);
1468 #endif
1469                 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1470                                  DMA_FROM_DEVICE);
1471                 dev_kfree_skb_any(skb);
1472                 skb = NULL;
1473         }
1474 
1475         kfree(bp->rx_skbuff);
1476         bp->rx_skbuff = NULL;
1477 }
1478 
1479 static void macb_free_rx_buffers(struct macb *bp)
1480 {
1481         if (bp->rx_buffers) {
1482                 dma_free_coherent(&bp->pdev->dev,
1483                                   RX_RING_SIZE * bp->rx_buffer_size,
1484                                   bp->rx_buffers, bp->rx_buffers_dma);
1485                 bp->rx_buffers = NULL;
1486         }
1487 }
1488 
1489 static void macb_free_consistent(struct macb *bp)
1490 {
1491         struct macb_queue *queue;
1492         unsigned int q;
1493 
1494         bp->macbgem_ops.mog_free_rx_buffers(bp);
1495         if (bp->rx_ring) {
1496                 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1497                                   bp->rx_ring, bp->rx_ring_dma);
1498                 bp->rx_ring = NULL;
1499         }
1500 
1501         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1502                 kfree(queue->tx_skb);
1503                 queue->tx_skb = NULL;
1504                 if (queue->tx_ring) {
1505                         dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1506                                           queue->tx_ring, queue->tx_ring_dma);
1507                         queue->tx_ring = NULL;
1508                 }
1509         }
1510 }
1511 
1512 static int gem_alloc_rx_buffers(struct macb *bp)
1513 {
1514         int size;
1515 
1516         size = RX_RING_SIZE * sizeof(struct sk_buff *);
1517         bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1518         if (!bp->rx_skbuff)
1519                 return -ENOMEM;
1520 
1521         netdev_dbg(bp->dev,
1522                    "Allocated %d RX struct sk_buff entries at %p\n",
1523                    RX_RING_SIZE, bp->rx_skbuff);
1524         return 0;
1525 }
1526 
1527 static int macb_alloc_rx_buffers(struct macb *bp)
1528 {
1529         int size;
1530 
1531         size = RX_RING_SIZE * bp->rx_buffer_size;
1532         bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1533                                             &bp->rx_buffers_dma, GFP_KERNEL);
1534         if (!bp->rx_buffers)
1535                 return -ENOMEM;
1536 
1537         netdev_dbg(bp->dev,
1538                    "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1539                    size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1540         return 0;
1541 }
1542 
1543 static int macb_alloc_consistent(struct macb *bp)
1544 {
1545         struct macb_queue *queue;
1546         unsigned int q;
1547         int size;
1548 
1549         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1550                 size = TX_RING_BYTES;
1551                 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1552                                                     &queue->tx_ring_dma,
1553                                                     GFP_KERNEL);
1554                 if (!queue->tx_ring)
1555                         goto out_err;
1556                 netdev_dbg(bp->dev,
1557                            "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1558                            q, size, (unsigned long)queue->tx_ring_dma,
1559                            queue->tx_ring);
1560 
1561                 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1562                 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1563                 if (!queue->tx_skb)
1564                         goto out_err;
1565         }
1566 
1567         size = RX_RING_BYTES;
1568         bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1569                                          &bp->rx_ring_dma, GFP_KERNEL);
1570         if (!bp->rx_ring)
1571                 goto out_err;
1572         netdev_dbg(bp->dev,
1573                    "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1574                    size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1575 
1576         if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1577                 goto out_err;
1578 
1579         return 0;
1580 
1581 out_err:
1582         macb_free_consistent(bp);
1583         return -ENOMEM;
1584 }
1585 
1586 static void gem_init_rings(struct macb *bp)
1587 {
1588         struct macb_queue *queue;
1589         unsigned int q;
1590         int i;
1591 
1592         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1593                 for (i = 0; i < TX_RING_SIZE; i++) {
1594                         macb_set_addr(&(queue->tx_ring[i]), 0);
1595                         queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1596                 }
1597                 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1598                 queue->tx_head = 0;
1599                 queue->tx_tail = 0;
1600         }
1601 
1602         bp->rx_tail = 0;
1603         bp->rx_prepared_head = 0;
1604 
1605         gem_rx_refill(bp);
1606 }
1607 
1608 static void macb_init_rings(struct macb *bp)
1609 {
1610         int i;
1611 
1612         macb_init_rx_ring(bp);
1613 
1614         for (i = 0; i < TX_RING_SIZE; i++) {
1615                 bp->queues[0].tx_ring[i].addr = 0;
1616                 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
1617         }
1618         bp->queues[0].tx_head = 0;
1619         bp->queues[0].tx_tail = 0;
1620         bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1621 }
1622 
1623 static void macb_reset_hw(struct macb *bp)
1624 {
1625         struct macb_queue *queue;
1626         unsigned int q;
1627 
1628         /* Disable RX and TX (XXX: Should we halt the transmission
1629          * more gracefully?)
1630          */
1631         macb_writel(bp, NCR, 0);
1632 
1633         /* Clear the stats registers (XXX: Update stats first?) */
1634         macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1635 
1636         /* Clear all status flags */
1637         macb_writel(bp, TSR, -1);
1638         macb_writel(bp, RSR, -1);
1639 
1640         /* Disable all interrupts */
1641         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1642                 queue_writel(queue, IDR, -1);
1643                 queue_readl(queue, ISR);
1644                 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1645                         queue_writel(queue, ISR, -1);
1646         }
1647 }
1648 
1649 static u32 gem_mdc_clk_div(struct macb *bp)
1650 {
1651         u32 config;
1652         unsigned long pclk_hz = clk_get_rate(bp->pclk);
1653 
1654         if (pclk_hz <= 20000000)
1655                 config = GEM_BF(CLK, GEM_CLK_DIV8);
1656         else if (pclk_hz <= 40000000)
1657                 config = GEM_BF(CLK, GEM_CLK_DIV16);
1658         else if (pclk_hz <= 80000000)
1659                 config = GEM_BF(CLK, GEM_CLK_DIV32);
1660         else if (pclk_hz <= 120000000)
1661                 config = GEM_BF(CLK, GEM_CLK_DIV48);
1662         else if (pclk_hz <= 160000000)
1663                 config = GEM_BF(CLK, GEM_CLK_DIV64);
1664         else
1665                 config = GEM_BF(CLK, GEM_CLK_DIV96);
1666 
1667         return config;
1668 }
1669 
1670 static u32 macb_mdc_clk_div(struct macb *bp)
1671 {
1672         u32 config;
1673         unsigned long pclk_hz;
1674 
1675         if (macb_is_gem(bp))
1676                 return gem_mdc_clk_div(bp);
1677 
1678         pclk_hz = clk_get_rate(bp->pclk);
1679         if (pclk_hz <= 20000000)
1680                 config = MACB_BF(CLK, MACB_CLK_DIV8);
1681         else if (pclk_hz <= 40000000)
1682                 config = MACB_BF(CLK, MACB_CLK_DIV16);
1683         else if (pclk_hz <= 80000000)
1684                 config = MACB_BF(CLK, MACB_CLK_DIV32);
1685         else
1686                 config = MACB_BF(CLK, MACB_CLK_DIV64);
1687 
1688         return config;
1689 }
1690 
1691 /* Get the DMA bus width field of the network configuration register that we
1692  * should program.  We find the width from decoding the design configuration
1693  * register to find the maximum supported data bus width.
1694  */
1695 static u32 macb_dbw(struct macb *bp)
1696 {
1697         if (!macb_is_gem(bp))
1698                 return 0;
1699 
1700         switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1701         case 4:
1702                 return GEM_BF(DBW, GEM_DBW128);
1703         case 2:
1704                 return GEM_BF(DBW, GEM_DBW64);
1705         case 1:
1706         default:
1707                 return GEM_BF(DBW, GEM_DBW32);
1708         }
1709 }
1710 
1711 /* Configure the receive DMA engine
1712  * - use the correct receive buffer size
1713  * - set best burst length for DMA operations
1714  *   (if not supported by FIFO, it will fallback to default)
1715  * - set both rx/tx packet buffers to full memory size
1716  * These are configurable parameters for GEM.
1717  */
1718 static void macb_configure_dma(struct macb *bp)
1719 {
1720         u32 dmacfg;
1721 
1722         if (macb_is_gem(bp)) {
1723                 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1724                 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1725                 if (bp->dma_burst_length)
1726                         dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1727                 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1728                 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1729 
1730                 if (bp->native_io)
1731                         dmacfg &= ~GEM_BIT(ENDIA_DESC);
1732                 else
1733                         dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1734 
1735                 if (bp->dev->features & NETIF_F_HW_CSUM)
1736                         dmacfg |= GEM_BIT(TXCOEN);
1737                 else
1738                         dmacfg &= ~GEM_BIT(TXCOEN);
1739 
1740 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1741                 dmacfg |= GEM_BIT(ADDR64);
1742 #endif
1743                 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1744                            dmacfg);
1745                 gem_writel(bp, DMACFG, dmacfg);
1746         }
1747 }
1748 
1749 static void macb_init_hw(struct macb *bp)
1750 {
1751         struct macb_queue *queue;
1752         unsigned int q;
1753 
1754         u32 config;
1755 
1756         macb_reset_hw(bp);
1757         macb_set_hwaddr(bp);
1758 
1759         config = macb_mdc_clk_div(bp);
1760         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1761                 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
1762         config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
1763         config |= MACB_BIT(PAE);                /* PAuse Enable */
1764         config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
1765         if (bp->caps & MACB_CAPS_JUMBO)
1766                 config |= MACB_BIT(JFRAME);     /* Enable jumbo frames */
1767         else
1768                 config |= MACB_BIT(BIG);        /* Receive oversized frames */
1769         if (bp->dev->flags & IFF_PROMISC)
1770                 config |= MACB_BIT(CAF);        /* Copy All Frames */
1771         else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1772                 config |= GEM_BIT(RXCOEN);
1773         if (!(bp->dev->flags & IFF_BROADCAST))
1774                 config |= MACB_BIT(NBC);        /* No BroadCast */
1775         config |= macb_dbw(bp);
1776         macb_writel(bp, NCFGR, config);
1777         if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
1778                 gem_writel(bp, JML, bp->jumbo_max_len);
1779         bp->speed = SPEED_10;
1780         bp->duplex = DUPLEX_HALF;
1781         bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
1782         if (bp->caps & MACB_CAPS_JUMBO)
1783                 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
1784 
1785         macb_configure_dma(bp);
1786 
1787         /* Initialize TX and RX buffers */
1788         macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
1789 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1790         macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
1791 #endif
1792         for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1793                 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
1794 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1795                 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
1796 #endif
1797 
1798                 /* Enable interrupts */
1799                 queue_writel(queue, IER,
1800                              MACB_RX_INT_FLAGS |
1801                              MACB_TX_INT_FLAGS |
1802                              MACB_BIT(HRESP));
1803         }
1804 
1805         /* Enable TX and RX */
1806         macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1807 }
1808 
1809 /* The hash address register is 64 bits long and takes up two
1810  * locations in the memory map.  The least significant bits are stored
1811  * in EMAC_HSL and the most significant bits in EMAC_HSH.
1812  *
1813  * The unicast hash enable and the multicast hash enable bits in the
1814  * network configuration register enable the reception of hash matched
1815  * frames. The destination address is reduced to a 6 bit index into
1816  * the 64 bit hash register using the following hash function.  The
1817  * hash function is an exclusive or of every sixth bit of the
1818  * destination address.
1819  *
1820  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1821  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1822  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1823  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1824  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1825  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1826  *
1827  * da[0] represents the least significant bit of the first byte
1828  * received, that is, the multicast/unicast indicator, and da[47]
1829  * represents the most significant bit of the last byte received.  If
1830  * the hash index, hi[n], points to a bit that is set in the hash
1831  * register then the frame will be matched according to whether the
1832  * frame is multicast or unicast.  A multicast match will be signalled
1833  * if the multicast hash enable bit is set, da[0] is 1 and the hash
1834  * index points to a bit set in the hash register.  A unicast match
1835  * will be signalled if the unicast hash enable bit is set, da[0] is 0
1836  * and the hash index points to a bit set in the hash register.  To
1837  * receive all multicast frames, the hash register should be set with
1838  * all ones and the multicast hash enable bit should be set in the
1839  * network configuration register.
1840  */
1841 
1842 static inline int hash_bit_value(int bitnr, __u8 *addr)
1843 {
1844         if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1845                 return 1;
1846         return 0;
1847 }
1848 
1849 /* Return the hash index value for the specified address. */
1850 static int hash_get_index(__u8 *addr)
1851 {
1852         int i, j, bitval;
1853         int hash_index = 0;
1854 
1855         for (j = 0; j < 6; j++) {
1856                 for (i = 0, bitval = 0; i < 8; i++)
1857                         bitval ^= hash_bit_value(i * 6 + j, addr);
1858 
1859                 hash_index |= (bitval << j);
1860         }
1861 
1862         return hash_index;
1863 }
1864 
1865 /* Add multicast addresses to the internal multicast-hash table. */
1866 static void macb_sethashtable(struct net_device *dev)
1867 {
1868         struct netdev_hw_addr *ha;
1869         unsigned long mc_filter[2];
1870         unsigned int bitnr;
1871         struct macb *bp = netdev_priv(dev);
1872 
1873         mc_filter[0] = 0;
1874         mc_filter[1] = 0;
1875 
1876         netdev_for_each_mc_addr(ha, dev) {
1877                 bitnr = hash_get_index(ha->addr);
1878                 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1879         }
1880 
1881         macb_or_gem_writel(bp, HRB, mc_filter[0]);
1882         macb_or_gem_writel(bp, HRT, mc_filter[1]);
1883 }
1884 
1885 /* Enable/Disable promiscuous and multicast modes. */
1886 static void macb_set_rx_mode(struct net_device *dev)
1887 {
1888         unsigned long cfg;
1889         struct macb *bp = netdev_priv(dev);
1890 
1891         cfg = macb_readl(bp, NCFGR);
1892 
1893         if (dev->flags & IFF_PROMISC) {
1894                 /* Enable promiscuous mode */
1895                 cfg |= MACB_BIT(CAF);
1896 
1897                 /* Disable RX checksum offload */
1898                 if (macb_is_gem(bp))
1899                         cfg &= ~GEM_BIT(RXCOEN);
1900         } else {
1901                 /* Disable promiscuous mode */
1902                 cfg &= ~MACB_BIT(CAF);
1903 
1904                 /* Enable RX checksum offload only if requested */
1905                 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
1906                         cfg |= GEM_BIT(RXCOEN);
1907         }
1908 
1909         if (dev->flags & IFF_ALLMULTI) {
1910                 /* Enable all multicast mode */
1911                 macb_or_gem_writel(bp, HRB, -1);
1912                 macb_or_gem_writel(bp, HRT, -1);
1913                 cfg |= MACB_BIT(NCFGR_MTI);
1914         } else if (!netdev_mc_empty(dev)) {
1915                 /* Enable specific multicasts */
1916                 macb_sethashtable(dev);
1917                 cfg |= MACB_BIT(NCFGR_MTI);
1918         } else if (dev->flags & (~IFF_ALLMULTI)) {
1919                 /* Disable all multicast mode */
1920                 macb_or_gem_writel(bp, HRB, 0);
1921                 macb_or_gem_writel(bp, HRT, 0);
1922                 cfg &= ~MACB_BIT(NCFGR_MTI);
1923         }
1924 
1925         macb_writel(bp, NCFGR, cfg);
1926 }
1927 
1928 static int macb_open(struct net_device *dev)
1929 {
1930         struct macb *bp = netdev_priv(dev);
1931         size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1932         int err;
1933 
1934         netdev_dbg(bp->dev, "open\n");
1935 
1936         /* carrier starts down */
1937         netif_carrier_off(dev);
1938 
1939         /* if the phy is not yet register, retry later*/
1940         if (!dev->phydev)
1941                 return -EAGAIN;
1942 
1943         /* RX buffers initialization */
1944         macb_init_rx_buffer_size(bp, bufsz);
1945 
1946         err = macb_alloc_consistent(bp);
1947         if (err) {
1948                 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1949                            err);
1950                 return err;
1951         }
1952 
1953         napi_enable(&bp->napi);
1954 
1955         bp->macbgem_ops.mog_init_rings(bp);
1956         macb_init_hw(bp);
1957 
1958         /* schedule a link state check */
1959         phy_start(dev->phydev);
1960 
1961         netif_tx_start_all_queues(dev);
1962 
1963         return 0;
1964 }
1965 
1966 static int macb_close(struct net_device *dev)
1967 {
1968         struct macb *bp = netdev_priv(dev);
1969         unsigned long flags;
1970 
1971         netif_tx_stop_all_queues(dev);
1972         napi_disable(&bp->napi);
1973 
1974         if (dev->phydev)
1975                 phy_stop(dev->phydev);
1976 
1977         spin_lock_irqsave(&bp->lock, flags);
1978         macb_reset_hw(bp);
1979         netif_carrier_off(dev);
1980         spin_unlock_irqrestore(&bp->lock, flags);
1981 
1982         macb_free_consistent(bp);
1983 
1984         return 0;
1985 }
1986 
1987 static int macb_change_mtu(struct net_device *dev, int new_mtu)
1988 {
1989         struct macb *bp = netdev_priv(dev);
1990         u32 max_mtu;
1991 
1992         if (netif_running(dev))
1993                 return -EBUSY;
1994 
1995         max_mtu = ETH_DATA_LEN;
1996         if (bp->caps & MACB_CAPS_JUMBO)
1997                 max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
1998 
1999         if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
2000                 return -EINVAL;
2001 
2002         dev->mtu = new_mtu;
2003 
2004         return 0;
2005 }
2006 
2007 static void gem_update_stats(struct macb *bp)
2008 {
2009         unsigned int i;
2010         u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2011 
2012         for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2013                 u32 offset = gem_statistics[i].offset;
2014                 u64 val = bp->macb_reg_readl(bp, offset);
2015 
2016                 bp->ethtool_stats[i] += val;
2017                 *p += val;
2018 
2019                 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2020                         /* Add GEM_OCTTXH, GEM_OCTRXH */
2021                         val = bp->macb_reg_readl(bp, offset + 4);
2022                         bp->ethtool_stats[i] += ((u64)val) << 32;
2023                         *(++p) += val;
2024                 }
2025         }
2026 }
2027 
2028 static struct net_device_stats *gem_get_stats(struct macb *bp)
2029 {
2030         struct gem_stats *hwstat = &bp->hw_stats.gem;
2031         struct net_device_stats *nstat = &bp->stats;
2032 
2033         gem_update_stats(bp);
2034 
2035         nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2036                             hwstat->rx_alignment_errors +
2037                             hwstat->rx_resource_errors +
2038                             hwstat->rx_overruns +
2039                             hwstat->rx_oversize_frames +
2040                             hwstat->rx_jabbers +
2041                             hwstat->rx_undersized_frames +
2042                             hwstat->rx_length_field_frame_errors);
2043         nstat->tx_errors = (hwstat->tx_late_collisions +
2044                             hwstat->tx_excessive_collisions +
2045                             hwstat->tx_underrun +
2046                             hwstat->tx_carrier_sense_errors);
2047         nstat->multicast = hwstat->rx_multicast_frames;
2048         nstat->collisions = (hwstat->tx_single_collision_frames +
2049                              hwstat->tx_multiple_collision_frames +
2050                              hwstat->tx_excessive_collisions);
2051         nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2052                                    hwstat->rx_jabbers +
2053                                    hwstat->rx_undersized_frames +
2054                                    hwstat->rx_length_field_frame_errors);
2055         nstat->rx_over_errors = hwstat->rx_resource_errors;
2056         nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2057         nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2058         nstat->rx_fifo_errors = hwstat->rx_overruns;
2059         nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2060         nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2061         nstat->tx_fifo_errors = hwstat->tx_underrun;
2062 
2063         return nstat;
2064 }
2065 
2066 static void gem_get_ethtool_stats(struct net_device *dev,
2067                                   struct ethtool_stats *stats, u64 *data)
2068 {
2069         struct macb *bp;
2070 
2071         bp = netdev_priv(dev);
2072         gem_update_stats(bp);
2073         memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
2074 }
2075 
2076 static int gem_get_sset_count(struct net_device *dev, int sset)
2077 {
2078         switch (sset) {
2079         case ETH_SS_STATS:
2080                 return GEM_STATS_LEN;
2081         default:
2082                 return -EOPNOTSUPP;
2083         }
2084 }
2085 
2086 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2087 {
2088         unsigned int i;
2089 
2090         switch (sset) {
2091         case ETH_SS_STATS:
2092                 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2093                         memcpy(p, gem_statistics[i].stat_string,
2094                                ETH_GSTRING_LEN);
2095                 break;
2096         }
2097 }
2098 
2099 static struct net_device_stats *macb_get_stats(struct net_device *dev)
2100 {
2101         struct macb *bp = netdev_priv(dev);
2102         struct net_device_stats *nstat = &bp->stats;
2103         struct macb_stats *hwstat = &bp->hw_stats.macb;
2104 
2105         if (macb_is_gem(bp))
2106                 return gem_get_stats(bp);
2107 
2108         /* read stats from hardware */
2109         macb_update_stats(bp);
2110 
2111         /* Convert HW stats into netdevice stats */
2112         nstat->rx_errors = (hwstat->rx_fcs_errors +
2113                             hwstat->rx_align_errors +
2114                             hwstat->rx_resource_errors +
2115                             hwstat->rx_overruns +
2116                             hwstat->rx_oversize_pkts +
2117                             hwstat->rx_jabbers +
2118                             hwstat->rx_undersize_pkts +
2119                             hwstat->rx_length_mismatch);
2120         nstat->tx_errors = (hwstat->tx_late_cols +
2121                             hwstat->tx_excessive_cols +
2122                             hwstat->tx_underruns +
2123                             hwstat->tx_carrier_errors +
2124                             hwstat->sqe_test_errors);
2125         nstat->collisions = (hwstat->tx_single_cols +
2126                              hwstat->tx_multiple_cols +
2127                              hwstat->tx_excessive_cols);
2128         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2129                                    hwstat->rx_jabbers +
2130                                    hwstat->rx_undersize_pkts +
2131                                    hwstat->rx_length_mismatch);
2132         nstat->rx_over_errors = hwstat->rx_resource_errors +
2133                                    hwstat->rx_overruns;
2134         nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2135         nstat->rx_frame_errors = hwstat->rx_align_errors;
2136         nstat->rx_fifo_errors = hwstat->rx_overruns;
2137         /* XXX: What does "missed" mean? */
2138         nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2139         nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2140         nstat->tx_fifo_errors = hwstat->tx_underruns;
2141         /* Don't know about heartbeat or window errors... */
2142 
2143         return nstat;
2144 }
2145 
2146 static int macb_get_regs_len(struct net_device *netdev)
2147 {
2148         return MACB_GREGS_NBR * sizeof(u32);
2149 }
2150 
2151 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2152                           void *p)
2153 {
2154         struct macb *bp = netdev_priv(dev);
2155         unsigned int tail, head;
2156         u32 *regs_buff = p;
2157 
2158         regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2159                         | MACB_GREGS_VERSION;
2160 
2161         tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
2162         head = macb_tx_ring_wrap(bp->queues[0].tx_head);
2163 
2164         regs_buff[0]  = macb_readl(bp, NCR);
2165         regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
2166         regs_buff[2]  = macb_readl(bp, NSR);
2167         regs_buff[3]  = macb_readl(bp, TSR);
2168         regs_buff[4]  = macb_readl(bp, RBQP);
2169         regs_buff[5]  = macb_readl(bp, TBQP);
2170         regs_buff[6]  = macb_readl(bp, RSR);
2171         regs_buff[7]  = macb_readl(bp, IMR);
2172 
2173         regs_buff[8]  = tail;
2174         regs_buff[9]  = head;
2175         regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2176         regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2177 
2178         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2179                 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2180         if (macb_is_gem(bp))
2181                 regs_buff[13] = gem_readl(bp, DMACFG);
2182 }
2183 
2184 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2185 {
2186         struct macb *bp = netdev_priv(netdev);
2187 
2188         wol->supported = 0;
2189         wol->wolopts = 0;
2190 
2191         if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2192                 wol->supported = WAKE_MAGIC;
2193 
2194                 if (bp->wol & MACB_WOL_ENABLED)
2195                         wol->wolopts |= WAKE_MAGIC;
2196         }
2197 }
2198 
2199 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2200 {
2201         struct macb *bp = netdev_priv(netdev);
2202 
2203         if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2204             (wol->wolopts & ~WAKE_MAGIC))
2205                 return -EOPNOTSUPP;
2206 
2207         if (wol->wolopts & WAKE_MAGIC)
2208                 bp->wol |= MACB_WOL_ENABLED;
2209         else
2210                 bp->wol &= ~MACB_WOL_ENABLED;
2211 
2212         device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2213 
2214         return 0;
2215 }
2216 
2217 static const struct ethtool_ops macb_ethtool_ops = {
2218         .get_regs_len           = macb_get_regs_len,
2219         .get_regs               = macb_get_regs,
2220         .get_link               = ethtool_op_get_link,
2221         .get_ts_info            = ethtool_op_get_ts_info,
2222         .get_wol                = macb_get_wol,
2223         .set_wol                = macb_set_wol,
2224         .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2225         .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2226 };
2227 
2228 static const struct ethtool_ops gem_ethtool_ops = {
2229         .get_regs_len           = macb_get_regs_len,
2230         .get_regs               = macb_get_regs,
2231         .get_link               = ethtool_op_get_link,
2232         .get_ts_info            = ethtool_op_get_ts_info,
2233         .get_ethtool_stats      = gem_get_ethtool_stats,
2234         .get_strings            = gem_get_ethtool_strings,
2235         .get_sset_count         = gem_get_sset_count,
2236         .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2237         .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2238 };
2239 
2240 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2241 {
2242         struct phy_device *phydev = dev->phydev;
2243 
2244         if (!netif_running(dev))
2245                 return -EINVAL;
2246 
2247         if (!phydev)
2248                 return -ENODEV;
2249 
2250         return phy_mii_ioctl(phydev, rq, cmd);
2251 }
2252 
2253 static int macb_set_features(struct net_device *netdev,
2254                              netdev_features_t features)
2255 {
2256         struct macb *bp = netdev_priv(netdev);
2257         netdev_features_t changed = features ^ netdev->features;
2258 
2259         /* TX checksum offload */
2260         if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2261                 u32 dmacfg;
2262 
2263                 dmacfg = gem_readl(bp, DMACFG);
2264                 if (features & NETIF_F_HW_CSUM)
2265                         dmacfg |= GEM_BIT(TXCOEN);
2266                 else
2267                         dmacfg &= ~GEM_BIT(TXCOEN);
2268                 gem_writel(bp, DMACFG, dmacfg);
2269         }
2270 
2271         /* RX checksum offload */
2272         if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2273                 u32 netcfg;
2274 
2275                 netcfg = gem_readl(bp, NCFGR);
2276                 if (features & NETIF_F_RXCSUM &&
2277                     !(netdev->flags & IFF_PROMISC))
2278                         netcfg |= GEM_BIT(RXCOEN);
2279                 else
2280                         netcfg &= ~GEM_BIT(RXCOEN);
2281                 gem_writel(bp, NCFGR, netcfg);
2282         }
2283 
2284         return 0;
2285 }
2286 
2287 static const struct net_device_ops macb_netdev_ops = {
2288         .ndo_open               = macb_open,
2289         .ndo_stop               = macb_close,
2290         .ndo_start_xmit         = macb_start_xmit,
2291         .ndo_set_rx_mode        = macb_set_rx_mode,
2292         .ndo_get_stats          = macb_get_stats,
2293         .ndo_do_ioctl           = macb_ioctl,
2294         .ndo_validate_addr      = eth_validate_addr,
2295         .ndo_change_mtu         = macb_change_mtu,
2296         .ndo_set_mac_address    = eth_mac_addr,
2297 #ifdef CONFIG_NET_POLL_CONTROLLER
2298         .ndo_poll_controller    = macb_poll_controller,
2299 #endif
2300         .ndo_set_features       = macb_set_features,
2301 };
2302 
2303 /* Configure peripheral capabilities according to device tree
2304  * and integration options used
2305  */
2306 static void macb_configure_caps(struct macb *bp,
2307                                 const struct macb_config *dt_conf)
2308 {
2309         u32 dcfg;
2310 
2311         if (dt_conf)
2312                 bp->caps = dt_conf->caps;
2313 
2314         if (hw_is_gem(bp->regs, bp->native_io)) {
2315                 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2316 
2317                 dcfg = gem_readl(bp, DCFG1);
2318                 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2319                         bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2320                 dcfg = gem_readl(bp, DCFG2);
2321                 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2322                         bp->caps |= MACB_CAPS_FIFO_MODE;
2323         }
2324 
2325         dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2326 }
2327 
2328 static void macb_probe_queues(void __iomem *mem,
2329                               bool native_io,
2330                               unsigned int *queue_mask,
2331                               unsigned int *num_queues)
2332 {
2333         unsigned int hw_q;
2334 
2335         *queue_mask = 0x1;
2336         *num_queues = 1;
2337 
2338         /* is it macb or gem ?
2339          *
2340          * We need to read directly from the hardware here because
2341          * we are early in the probe process and don't have the
2342          * MACB_CAPS_MACB_IS_GEM flag positioned
2343          */
2344         if (!hw_is_gem(mem, native_io))
2345                 return;
2346 
2347         /* bit 0 is never set but queue 0 always exists */
2348         *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
2349 
2350         *queue_mask |= 0x1;
2351 
2352         for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2353                 if (*queue_mask & (1 << hw_q))
2354                         (*num_queues)++;
2355 }
2356 
2357 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2358                          struct clk **hclk, struct clk **tx_clk,
2359                          struct clk **rx_clk)
2360 {
2361         int err;
2362 
2363         *pclk = devm_clk_get(&pdev->dev, "pclk");
2364         if (IS_ERR(*pclk)) {
2365                 err = PTR_ERR(*pclk);
2366                 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2367                 return err;
2368         }
2369 
2370         *hclk = devm_clk_get(&pdev->dev, "hclk");
2371         if (IS_ERR(*hclk)) {
2372                 err = PTR_ERR(*hclk);
2373                 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2374                 return err;
2375         }
2376 
2377         *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2378         if (IS_ERR(*tx_clk))
2379                 *tx_clk = NULL;
2380 
2381         *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
2382         if (IS_ERR(*rx_clk))
2383                 *rx_clk = NULL;
2384 
2385         err = clk_prepare_enable(*pclk);
2386         if (err) {
2387                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2388                 return err;
2389         }
2390 
2391         err = clk_prepare_enable(*hclk);
2392         if (err) {
2393                 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2394                 goto err_disable_pclk;
2395         }
2396 
2397         err = clk_prepare_enable(*tx_clk);
2398         if (err) {
2399                 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2400                 goto err_disable_hclk;
2401         }
2402 
2403         err = clk_prepare_enable(*rx_clk);
2404         if (err) {
2405                 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2406                 goto err_disable_txclk;
2407         }
2408 
2409         return 0;
2410 
2411 err_disable_txclk:
2412         clk_disable_unprepare(*tx_clk);
2413 
2414 err_disable_hclk:
2415         clk_disable_unprepare(*hclk);
2416 
2417 err_disable_pclk:
2418         clk_disable_unprepare(*pclk);
2419 
2420         return err;
2421 }
2422 
2423 static int macb_init(struct platform_device *pdev)
2424 {
2425         struct net_device *dev = platform_get_drvdata(pdev);
2426         unsigned int hw_q, q;
2427         struct macb *bp = netdev_priv(dev);
2428         struct macb_queue *queue;
2429         int err;
2430         u32 val;
2431 
2432         /* set the queue register mapping once for all: queue0 has a special
2433          * register mapping but we don't want to test the queue index then
2434          * compute the corresponding register offset at run time.
2435          */
2436         for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2437                 if (!(bp->queue_mask & (1 << hw_q)))
2438                         continue;
2439 
2440                 queue = &bp->queues[q];
2441                 queue->bp = bp;
2442                 if (hw_q) {
2443                         queue->ISR  = GEM_ISR(hw_q - 1);
2444                         queue->IER  = GEM_IER(hw_q - 1);
2445                         queue->IDR  = GEM_IDR(hw_q - 1);
2446                         queue->IMR  = GEM_IMR(hw_q - 1);
2447                         queue->TBQP = GEM_TBQP(hw_q - 1);
2448 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2449                         queue->TBQPH = GEM_TBQPH(hw_q -1);
2450 #endif
2451                 } else {
2452                         /* queue0 uses legacy registers */
2453                         queue->ISR  = MACB_ISR;
2454                         queue->IER  = MACB_IER;
2455                         queue->IDR  = MACB_IDR;
2456                         queue->IMR  = MACB_IMR;
2457                         queue->TBQP = MACB_TBQP;
2458 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2459                         queue->TBQPH = MACB_TBQPH;
2460 #endif
2461                 }
2462 
2463                 /* get irq: here we use the linux queue index, not the hardware
2464                  * queue index. the queue irq definitions in the device tree
2465                  * must remove the optional gaps that could exist in the
2466                  * hardware queue mask.
2467                  */
2468                 queue->irq = platform_get_irq(pdev, q);
2469                 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2470                                        IRQF_SHARED, dev->name, queue);
2471                 if (err) {
2472                         dev_err(&pdev->dev,
2473                                 "Unable to request IRQ %d (error %d)\n",
2474                                 queue->irq, err);
2475                         return err;
2476                 }
2477 
2478                 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2479                 q++;
2480         }
2481 
2482         dev->netdev_ops = &macb_netdev_ops;
2483         netif_napi_add(dev, &bp->napi, macb_poll, 64);
2484 
2485         /* setup appropriated routines according to adapter type */
2486         if (macb_is_gem(bp)) {
2487                 bp->max_tx_length = GEM_MAX_TX_LEN;
2488                 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2489                 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2490                 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2491                 bp->macbgem_ops.mog_rx = gem_rx;
2492                 dev->ethtool_ops = &gem_ethtool_ops;
2493         } else {
2494                 bp->max_tx_length = MACB_MAX_TX_LEN;
2495                 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2496                 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2497                 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2498                 bp->macbgem_ops.mog_rx = macb_rx;
2499                 dev->ethtool_ops = &macb_ethtool_ops;
2500         }
2501 
2502         /* Set features */
2503         dev->hw_features = NETIF_F_SG;
2504         /* Checksum offload is only available on gem with packet buffer */
2505         if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2506                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2507         if (bp->caps & MACB_CAPS_SG_DISABLED)
2508                 dev->hw_features &= ~NETIF_F_SG;
2509         dev->features = dev->hw_features;
2510 
2511         if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
2512                 val = 0;
2513                 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2514                         val = GEM_BIT(RGMII);
2515                 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
2516                          (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2517                         val = MACB_BIT(RMII);
2518                 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2519                         val = MACB_BIT(MII);
2520 
2521                 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
2522                         val |= MACB_BIT(CLKEN);
2523 
2524                 macb_or_gem_writel(bp, USRIO, val);
2525         }
2526 
2527         /* Set MII management clock divider */
2528         val = macb_mdc_clk_div(bp);
2529         val |= macb_dbw(bp);
2530         if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2531                 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2532         macb_writel(bp, NCFGR, val);
2533 
2534         return 0;
2535 }
2536 
2537 #if defined(CONFIG_OF)
2538 /* 1518 rounded up */
2539 #define AT91ETHER_MAX_RBUFF_SZ  0x600
2540 /* max number of receive buffers */
2541 #define AT91ETHER_MAX_RX_DESCR  9
2542 
2543 /* Initialize and start the Receiver and Transmit subsystems */
2544 static int at91ether_start(struct net_device *dev)
2545 {
2546         struct macb *lp = netdev_priv(dev);
2547         dma_addr_t addr;
2548         u32 ctl;
2549         int i;
2550 
2551         lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2552                                          (AT91ETHER_MAX_RX_DESCR *
2553                                           sizeof(struct macb_dma_desc)),
2554                                          &lp->rx_ring_dma, GFP_KERNEL);
2555         if (!lp->rx_ring)
2556                 return -ENOMEM;
2557 
2558         lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
2559                                             AT91ETHER_MAX_RX_DESCR *
2560                                             AT91ETHER_MAX_RBUFF_SZ,
2561                                             &lp->rx_buffers_dma, GFP_KERNEL);
2562         if (!lp->rx_buffers) {
2563                 dma_free_coherent(&lp->pdev->dev,
2564                                   AT91ETHER_MAX_RX_DESCR *
2565                                   sizeof(struct macb_dma_desc),
2566                                   lp->rx_ring, lp->rx_ring_dma);
2567                 lp->rx_ring = NULL;
2568                 return -ENOMEM;
2569         }
2570 
2571         addr = lp->rx_buffers_dma;
2572         for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2573                 lp->rx_ring[i].addr = addr;
2574                 lp->rx_ring[i].ctrl = 0;
2575                 addr += AT91ETHER_MAX_RBUFF_SZ;
2576         }
2577 
2578         /* Set the Wrap bit on the last descriptor */
2579         lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
2580 
2581         /* Reset buffer index */
2582         lp->rx_tail = 0;
2583 
2584         /* Program address of descriptor list in Rx Buffer Queue register */
2585         macb_writel(lp, RBQP, lp->rx_ring_dma);
2586 
2587         /* Enable Receive and Transmit */
2588         ctl = macb_readl(lp, NCR);
2589         macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
2590 
2591         return 0;
2592 }
2593 
2594 /* Open the ethernet interface */
2595 static int at91ether_open(struct net_device *dev)
2596 {
2597         struct macb *lp = netdev_priv(dev);
2598         u32 ctl;
2599         int ret;
2600 
2601         /* Clear internal statistics */
2602         ctl = macb_readl(lp, NCR);
2603         macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2604 
2605         macb_set_hwaddr(lp);
2606 
2607         ret = at91ether_start(dev);
2608         if (ret)
2609                 return ret;
2610 
2611         /* Enable MAC interrupts */
2612         macb_writel(lp, IER, MACB_BIT(RCOMP)    |
2613                              MACB_BIT(RXUBR)    |
2614                              MACB_BIT(ISR_TUND) |
2615                              MACB_BIT(ISR_RLE)  |
2616                              MACB_BIT(TCOMP)    |
2617                              MACB_BIT(ISR_ROVR) |
2618                              MACB_BIT(HRESP));
2619 
2620         /* schedule a link state check */
2621         phy_start(dev->phydev);
2622 
2623         netif_start_queue(dev);
2624 
2625         return 0;
2626 }
2627 
2628 /* Close the interface */
2629 static int at91ether_close(struct net_device *dev)
2630 {
2631         struct macb *lp = netdev_priv(dev);
2632         u32 ctl;
2633 
2634         /* Disable Receiver and Transmitter */
2635         ctl = macb_readl(lp, NCR);
2636         macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
2637 
2638         /* Disable MAC interrupts */
2639         macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
2640                              MACB_BIT(RXUBR)    |
2641                              MACB_BIT(ISR_TUND) |
2642                              MACB_BIT(ISR_RLE)  |
2643                              MACB_BIT(TCOMP)    |
2644                              MACB_BIT(ISR_ROVR) |
2645                              MACB_BIT(HRESP));
2646 
2647         netif_stop_queue(dev);
2648 
2649         dma_free_coherent(&lp->pdev->dev,
2650                           AT91ETHER_MAX_RX_DESCR *
2651                           sizeof(struct macb_dma_desc),
2652                           lp->rx_ring, lp->rx_ring_dma);
2653         lp->rx_ring = NULL;
2654 
2655         dma_free_coherent(&lp->pdev->dev,
2656                           AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
2657                           lp->rx_buffers, lp->rx_buffers_dma);
2658         lp->rx_buffers = NULL;
2659 
2660         return 0;
2661 }
2662 
2663 /* Transmit packet */
2664 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2665 {
2666         struct macb *lp = netdev_priv(dev);
2667 
2668         if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
2669                 netif_stop_queue(dev);
2670 
2671                 /* Store packet information (to free when Tx completed) */
2672                 lp->skb = skb;
2673                 lp->skb_length = skb->len;
2674                 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2675                                                         DMA_TO_DEVICE);
2676                 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
2677                         dev_kfree_skb_any(skb);
2678                         dev->stats.tx_dropped++;
2679                         netdev_err(dev, "%s: DMA mapping error\n", __func__);
2680                         return NETDEV_TX_OK;
2681                 }
2682 
2683                 /* Set address of the data in the Transmit Address register */
2684                 macb_writel(lp, TAR, lp->skb_physaddr);
2685                 /* Set length of the packet in the Transmit Control register */
2686                 macb_writel(lp, TCR, skb->len);
2687 
2688         } else {
2689                 netdev_err(dev, "%s called, but device is busy!\n", __func__);
2690                 return NETDEV_TX_BUSY;
2691         }
2692 
2693         return NETDEV_TX_OK;
2694 }
2695 
2696 /* Extract received frame from buffer descriptors and sent to upper layers.
2697  * (Called from interrupt context)
2698  */
2699 static void at91ether_rx(struct net_device *dev)
2700 {
2701         struct macb *lp = netdev_priv(dev);
2702         unsigned char *p_recv;
2703         struct sk_buff *skb;
2704         unsigned int pktlen;
2705 
2706         while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
2707                 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2708                 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
2709                 skb = netdev_alloc_skb(dev, pktlen + 2);
2710                 if (skb) {
2711                         skb_reserve(skb, 2);
2712                         memcpy(skb_put(skb, pktlen), p_recv, pktlen);
2713 
2714                         skb->protocol = eth_type_trans(skb, dev);
2715                         lp->stats.rx_packets++;
2716                         lp->stats.rx_bytes += pktlen;
2717                         netif_rx(skb);
2718                 } else {
2719                         lp->stats.rx_dropped++;
2720                 }
2721 
2722                 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
2723                         lp->stats.multicast++;
2724 
2725                 /* reset ownership bit */
2726                 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
2727 
2728                 /* wrap after last buffer */
2729                 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2730                         lp->rx_tail = 0;
2731                 else
2732                         lp->rx_tail++;
2733         }
2734 }
2735 
2736 /* MAC interrupt handler */
2737 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
2738 {
2739         struct net_device *dev = dev_id;
2740         struct macb *lp = netdev_priv(dev);
2741         u32 intstatus, ctl;
2742 
2743         /* MAC Interrupt Status register indicates what interrupts are pending.
2744          * It is automatically cleared once read.
2745          */
2746         intstatus = macb_readl(lp, ISR);
2747 
2748         /* Receive complete */
2749         if (intstatus & MACB_BIT(RCOMP))
2750                 at91ether_rx(dev);
2751 
2752         /* Transmit complete */
2753         if (intstatus & MACB_BIT(TCOMP)) {
2754                 /* The TCOM bit is set even if the transmission failed */
2755                 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
2756                         lp->stats.tx_errors++;
2757 
2758                 if (lp->skb) {
2759                         dev_kfree_skb_irq(lp->skb);
2760                         lp->skb = NULL;
2761                         dma_unmap_single(NULL, lp->skb_physaddr,
2762                                          lp->skb_length, DMA_TO_DEVICE);
2763                         lp->stats.tx_packets++;
2764                         lp->stats.tx_bytes += lp->skb_length;
2765                 }
2766                 netif_wake_queue(dev);
2767         }
2768 
2769         /* Work-around for EMAC Errata section 41.3.1 */
2770         if (intstatus & MACB_BIT(RXUBR)) {
2771                 ctl = macb_readl(lp, NCR);
2772                 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
2773                 wmb();
2774                 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
2775         }
2776 
2777         if (intstatus & MACB_BIT(ISR_ROVR))
2778                 netdev_err(dev, "ROVR error\n");
2779 
2780         return IRQ_HANDLED;
2781 }
2782 
2783 #ifdef CONFIG_NET_POLL_CONTROLLER
2784 static void at91ether_poll_controller(struct net_device *dev)
2785 {
2786         unsigned long flags;
2787 
2788         local_irq_save(flags);
2789         at91ether_interrupt(dev->irq, dev);
2790         local_irq_restore(flags);
2791 }
2792 #endif
2793 
2794 static const struct net_device_ops at91ether_netdev_ops = {
2795         .ndo_open               = at91ether_open,
2796         .ndo_stop               = at91ether_close,
2797         .ndo_start_xmit         = at91ether_start_xmit,
2798         .ndo_get_stats          = macb_get_stats,
2799         .ndo_set_rx_mode        = macb_set_rx_mode,
2800         .ndo_set_mac_address    = eth_mac_addr,
2801         .ndo_do_ioctl           = macb_ioctl,
2802         .ndo_validate_addr      = eth_validate_addr,
2803         .ndo_change_mtu         = eth_change_mtu,
2804 #ifdef CONFIG_NET_POLL_CONTROLLER
2805         .ndo_poll_controller    = at91ether_poll_controller,
2806 #endif
2807 };
2808 
2809 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
2810                               struct clk **hclk, struct clk **tx_clk,
2811                               struct clk **rx_clk)
2812 {
2813         int err;
2814 
2815         *hclk = NULL;
2816         *tx_clk = NULL;
2817         *rx_clk = NULL;
2818 
2819         *pclk = devm_clk_get(&pdev->dev, "ether_clk");
2820         if (IS_ERR(*pclk))
2821                 return PTR_ERR(*pclk);
2822 
2823         err = clk_prepare_enable(*pclk);
2824         if (err) {
2825                 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2826                 return err;
2827         }
2828 
2829         return 0;
2830 }
2831 
2832 static int at91ether_init(struct platform_device *pdev)
2833 {
2834         struct net_device *dev = platform_get_drvdata(pdev);
2835         struct macb *bp = netdev_priv(dev);
2836         int err;
2837         u32 reg;
2838 
2839         dev->netdev_ops = &at91ether_netdev_ops;
2840         dev->ethtool_ops = &macb_ethtool_ops;
2841 
2842         err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
2843                                0, dev->name, dev);
2844         if (err)
2845                 return err;
2846 
2847         macb_writel(bp, NCR, 0);
2848 
2849         reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
2850         if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
2851                 reg |= MACB_BIT(RM9200_RMII);
2852 
2853         macb_writel(bp, NCFGR, reg);
2854 
2855         return 0;
2856 }
2857 
2858 static const struct macb_config at91sam9260_config = {
2859         .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2860         .clk_init = macb_clk_init,
2861         .init = macb_init,
2862 };
2863 
2864 static const struct macb_config pc302gem_config = {
2865         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2866         .dma_burst_length = 16,
2867         .clk_init = macb_clk_init,
2868         .init = macb_init,
2869 };
2870 
2871 static const struct macb_config sama5d2_config = {
2872         .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2873         .dma_burst_length = 16,
2874         .clk_init = macb_clk_init,
2875         .init = macb_init,
2876 };
2877 
2878 static const struct macb_config sama5d3_config = {
2879         .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
2880               | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2881         .dma_burst_length = 16,
2882         .clk_init = macb_clk_init,
2883         .init = macb_init,
2884 };
2885 
2886 static const struct macb_config sama5d4_config = {
2887         .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2888         .dma_burst_length = 4,
2889         .clk_init = macb_clk_init,
2890         .init = macb_init,
2891 };
2892 
2893 static const struct macb_config emac_config = {
2894         .clk_init = at91ether_clk_init,
2895         .init = at91ether_init,
2896 };
2897 
2898 static const struct macb_config np4_config = {
2899         .caps = MACB_CAPS_USRIO_DISABLED,
2900         .clk_init = macb_clk_init,
2901         .init = macb_init,
2902 };
2903 
2904 static const struct macb_config zynqmp_config = {
2905         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
2906         .dma_burst_length = 16,
2907         .clk_init = macb_clk_init,
2908         .init = macb_init,
2909         .jumbo_max_len = 10240,
2910 };
2911 
2912 static const struct macb_config zynq_config = {
2913         .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
2914         .dma_burst_length = 16,
2915         .clk_init = macb_clk_init,
2916         .init = macb_init,
2917 };
2918 
2919 static const struct of_device_id macb_dt_ids[] = {
2920         { .compatible = "cdns,at32ap7000-macb" },
2921         { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
2922         { .compatible = "cdns,macb" },
2923         { .compatible = "cdns,np4-macb", .data = &np4_config },
2924         { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
2925         { .compatible = "cdns,gem", .data = &pc302gem_config },
2926         { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
2927         { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
2928         { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
2929         { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
2930         { .compatible = "cdns,emac", .data = &emac_config },
2931         { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
2932         { .compatible = "cdns,zynq-gem", .data = &zynq_config },
2933         { /* sentinel */ }
2934 };
2935 MODULE_DEVICE_TABLE(of, macb_dt_ids);
2936 #endif /* CONFIG_OF */
2937 
2938 static int macb_probe(struct platform_device *pdev)
2939 {
2940         int (*clk_init)(struct platform_device *, struct clk **,
2941                         struct clk **, struct clk **,  struct clk **)
2942                                               = macb_clk_init;
2943         int (*init)(struct platform_device *) = macb_init;
2944         struct device_node *np = pdev->dev.of_node;
2945         struct device_node *phy_node;
2946         const struct macb_config *macb_config = NULL;
2947         struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
2948         unsigned int queue_mask, num_queues;
2949         struct macb_platform_data *pdata;
2950         bool native_io;
2951         struct phy_device *phydev;
2952         struct net_device *dev;
2953         struct resource *regs;
2954         void __iomem *mem;
2955         const char *mac;
2956         struct macb *bp;
2957         int err;
2958 
2959         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2960         mem = devm_ioremap_resource(&pdev->dev, regs);
2961         if (IS_ERR(mem))
2962                 return PTR_ERR(mem);
2963 
2964         if (np) {
2965                 const struct of_device_id *match;
2966 
2967                 match = of_match_node(macb_dt_ids, np);
2968                 if (match && match->data) {
2969                         macb_config = match->data;
2970                         clk_init = macb_config->clk_init;
2971                         init = macb_config->init;
2972                 }
2973         }
2974 
2975         err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
2976         if (err)
2977                 return err;
2978 
2979         native_io = hw_is_native_io(mem);
2980 
2981         macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2982         dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2983         if (!dev) {
2984                 err = -ENOMEM;
2985                 goto err_disable_clocks;
2986         }
2987 
2988         dev->base_addr = regs->start;
2989 
2990         SET_NETDEV_DEV(dev, &pdev->dev);
2991 
2992         bp = netdev_priv(dev);
2993         bp->pdev = pdev;
2994         bp->dev = dev;
2995         bp->regs = mem;
2996         bp->native_io = native_io;
2997         if (native_io) {
2998                 bp->macb_reg_readl = hw_readl_native;
2999                 bp->macb_reg_writel = hw_writel_native;
3000         } else {
3001                 bp->macb_reg_readl = hw_readl;
3002                 bp->macb_reg_writel = hw_writel;
3003         }
3004         bp->num_queues = num_queues;
3005         bp->queue_mask = queue_mask;
3006         if (macb_config)
3007                 bp->dma_burst_length = macb_config->dma_burst_length;
3008         bp->pclk = pclk;
3009         bp->hclk = hclk;
3010         bp->tx_clk = tx_clk;
3011         bp->rx_clk = rx_clk;
3012         if (macb_config)
3013                 bp->jumbo_max_len = macb_config->jumbo_max_len;
3014 
3015         bp->wol = 0;
3016         if (of_get_property(np, "magic-packet", NULL))
3017                 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
3018         device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3019 
3020 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3021         if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
3022                 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3023 #endif
3024 
3025         spin_lock_init(&bp->lock);
3026 
3027         /* setup capabilities */
3028         macb_configure_caps(bp, macb_config);
3029 
3030         platform_set_drvdata(pdev, dev);
3031 
3032         dev->irq = platform_get_irq(pdev, 0);
3033         if (dev->irq < 0) {
3034                 err = dev->irq;
3035                 goto err_out_free_netdev;
3036         }
3037 
3038         mac = of_get_mac_address(np);
3039         if (mac)
3040                 ether_addr_copy(bp->dev->dev_addr, mac);
3041         else
3042                 macb_get_hwaddr(bp);
3043 
3044         /* Power up the PHY if there is a GPIO reset */
3045         phy_node =  of_get_next_available_child(np, NULL);
3046         if (phy_node) {
3047                 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
3048 
3049                 if (gpio_is_valid(gpio)) {
3050                         bp->reset_gpio = gpio_to_desc(gpio);
3051                         gpiod_direction_output(bp->reset_gpio, 1);
3052                 }
3053         }
3054         of_node_put(phy_node);
3055 
3056         err = of_get_phy_mode(np);
3057         if (err < 0) {
3058                 pdata = dev_get_platdata(&pdev->dev);
3059                 if (pdata && pdata->is_rmii)
3060                         bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3061                 else
3062                         bp->phy_interface = PHY_INTERFACE_MODE_MII;
3063         } else {
3064                 bp->phy_interface = err;
3065         }
3066 
3067         /* IP specific init */
3068         err = init(pdev);
3069         if (err)
3070                 goto err_out_free_netdev;
3071 
3072         err = macb_mii_init(bp);
3073         if (err)
3074                 goto err_out_free_netdev;
3075 
3076         phydev = dev->phydev;
3077 
3078         netif_carrier_off(dev);
3079 
3080         err = register_netdev(dev);
3081         if (err) {
3082                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3083                 goto err_out_unregister_mdio;
3084         }
3085 
3086         phy_attached_info(phydev);
3087 
3088         netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3089                     macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3090                     dev->base_addr, dev->irq, dev->dev_addr);
3091 
3092         return 0;
3093 
3094 err_out_unregister_mdio:
3095         phy_disconnect(dev->phydev);
3096         mdiobus_unregister(bp->mii_bus);
3097         mdiobus_free(bp->mii_bus);
3098 
3099         /* Shutdown the PHY if there is a GPIO reset */
3100         if (bp->reset_gpio)
3101                 gpiod_set_value(bp->reset_gpio, 0);
3102 
3103 err_out_free_netdev:
3104         free_netdev(dev);
3105 
3106 err_disable_clocks:
3107         clk_disable_unprepare(tx_clk);
3108         clk_disable_unprepare(hclk);
3109         clk_disable_unprepare(pclk);
3110         clk_disable_unprepare(rx_clk);
3111 
3112         return err;
3113 }
3114 
3115 static int macb_remove(struct platform_device *pdev)
3116 {
3117         struct net_device *dev;
3118         struct macb *bp;
3119 
3120         dev = platform_get_drvdata(pdev);
3121 
3122         if (dev) {
3123                 bp = netdev_priv(dev);
3124                 if (dev->phydev)
3125                         phy_disconnect(dev->phydev);
3126                 mdiobus_unregister(bp->mii_bus);
3127                 dev->phydev = NULL;
3128                 mdiobus_free(bp->mii_bus);
3129 
3130                 /* Shutdown the PHY if there is a GPIO reset */
3131                 if (bp->reset_gpio)
3132                         gpiod_set_value(bp->reset_gpio, 0);
3133 
3134                 unregister_netdev(dev);
3135                 clk_disable_unprepare(bp->tx_clk);
3136                 clk_disable_unprepare(bp->hclk);
3137                 clk_disable_unprepare(bp->pclk);
3138                 clk_disable_unprepare(bp->rx_clk);
3139                 free_netdev(dev);
3140         }
3141 
3142         return 0;
3143 }
3144 
3145 static int __maybe_unused macb_suspend(struct device *dev)
3146 {
3147         struct platform_device *pdev = to_platform_device(dev);
3148         struct net_device *netdev = platform_get_drvdata(pdev);
3149         struct macb *bp = netdev_priv(netdev);
3150 
3151         netif_carrier_off(netdev);
3152         netif_device_detach(netdev);
3153 
3154         if (bp->wol & MACB_WOL_ENABLED) {
3155                 macb_writel(bp, IER, MACB_BIT(WOL));
3156                 macb_writel(bp, WOL, MACB_BIT(MAG));
3157                 enable_irq_wake(bp->queues[0].irq);
3158         } else {
3159                 clk_disable_unprepare(bp->tx_clk);
3160                 clk_disable_unprepare(bp->hclk);
3161                 clk_disable_unprepare(bp->pclk);
3162                 clk_disable_unprepare(bp->rx_clk);
3163         }
3164 
3165         return 0;
3166 }
3167 
3168 static int __maybe_unused macb_resume(struct device *dev)
3169 {
3170         struct platform_device *pdev = to_platform_device(dev);
3171         struct net_device *netdev = platform_get_drvdata(pdev);
3172         struct macb *bp = netdev_priv(netdev);
3173 
3174         if (bp->wol & MACB_WOL_ENABLED) {
3175                 macb_writel(bp, IDR, MACB_BIT(WOL));
3176                 macb_writel(bp, WOL, 0);
3177                 disable_irq_wake(bp->queues[0].irq);
3178         } else {
3179                 clk_prepare_enable(bp->pclk);
3180                 clk_prepare_enable(bp->hclk);
3181                 clk_prepare_enable(bp->tx_clk);
3182                 clk_prepare_enable(bp->rx_clk);
3183         }
3184 
3185         netif_device_attach(netdev);
3186 
3187         return 0;
3188 }
3189 
3190 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
3191 
3192 static struct platform_driver macb_driver = {
3193         .probe          = macb_probe,
3194         .remove         = macb_remove,
3195         .driver         = {
3196                 .name           = "macb",
3197                 .of_match_table = of_match_ptr(macb_dt_ids),
3198                 .pm     = &macb_pm_ops,
3199         },
3200 };
3201 
3202 module_platform_driver(macb_driver);
3203 
3204 MODULE_LICENSE("GPL");
3205 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
3206 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3207 MODULE_ALIAS("platform:macb");
3208 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us